mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-04 08:10:25 +00:00
Compare commits
641 Commits
langchain-
...
langchain=
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
842065a9cc | ||
|
|
27ad6a4bb3 | ||
|
|
dda9438e87 | ||
|
|
604dfe2d99 | ||
|
|
f101c759ed | ||
|
|
372c27f2e5 | ||
|
|
6a45bf9554 | ||
|
|
f5856680fe | ||
|
|
07715f815b | ||
|
|
020cc1cf3e | ||
|
|
9aae8ef416 | ||
|
|
06f47678ae | ||
|
|
9c3da11910 | ||
|
|
5affbada61 | ||
|
|
f9d64d22e5 | ||
|
|
3691701d58 | ||
|
|
ef049769f0 | ||
|
|
cd19ba9a07 | ||
|
|
83f3d95ffa | ||
|
|
b5acb91080 | ||
|
|
f99369a54c | ||
|
|
242b085be7 | ||
|
|
c3308f31bc | ||
|
|
c50dd79512 | ||
|
|
aade9bfde5 | ||
|
|
0ee6ed76ca | ||
|
|
62b6965d2a | ||
|
|
ef22ebe431 | ||
|
|
f62b323108 | ||
|
|
b2bc15e640 | ||
|
|
61ea7bf60b | ||
|
|
4c651ba13a | ||
|
|
334fc1ed1c | ||
|
|
ba74341eee | ||
|
|
3adf710f1d | ||
|
|
07c5c60f63 | ||
|
|
aade1550c6 | ||
|
|
63c60a31f0 | ||
|
|
242de9aa5e | ||
|
|
916b813107 | ||
|
|
1c65529fd7 | ||
|
|
6182a402f1 | ||
|
|
0dec72cab0 | ||
|
|
570566b858 | ||
|
|
f9baaae3ec | ||
|
|
4da1df568a | ||
|
|
96ccba9c27 | ||
|
|
a4c101ae97 | ||
|
|
c5a07e2dd8 | ||
|
|
80f3d48195 | ||
|
|
7d83189b19 | ||
|
|
eb26b5535a | ||
|
|
96bac8e20d | ||
|
|
034a8c7c1b | ||
|
|
a402de3dae | ||
|
|
a47f69a120 | ||
|
|
cc2cbfabfc | ||
|
|
9e4a0e76f6 | ||
|
|
81639243e2 | ||
|
|
61976a4147 | ||
|
|
b5360e2e5f | ||
|
|
4cf67084d3 | ||
|
|
bcb5f354ad | ||
|
|
24e9b48d15 | ||
|
|
cf28708e7b | ||
|
|
2a3288b15d | ||
|
|
1792684e8f | ||
|
|
e60ad12521 | ||
|
|
fc41730e28 | ||
|
|
47ed7f766a | ||
|
|
80e7cd6cff | ||
|
|
6c3e65a878 | ||
|
|
616196c620 | ||
|
|
dd7938ace8 | ||
|
|
ef07308c30 | ||
|
|
049bc37111 | ||
|
|
5ccf8ebfac | ||
|
|
1e9cc02ed8 | ||
|
|
dc42279eb5 | ||
|
|
e38bf08139 | ||
|
|
5caa381177 | ||
|
|
c59e663365 | ||
|
|
0c1889c713 | ||
|
|
5fcf2ef7ca | ||
|
|
77dd327282 | ||
|
|
f5a38772a8 | ||
|
|
5f2dea2b20 | ||
|
|
69b1603173 | ||
|
|
d83164f837 | ||
|
|
198b85334f | ||
|
|
7aeaa1974d | ||
|
|
1c753d1e81 | ||
|
|
6716379f0c | ||
|
|
58fdb070fa | ||
|
|
1d7a3ae7ce | ||
|
|
d2f671271e | ||
|
|
a3c10fc6ce | ||
|
|
5171ffc026 | ||
|
|
6c7d9f93b9 | ||
|
|
8f4620f4b8 | ||
|
|
9d97de34ae | ||
|
|
56cca23745 | ||
|
|
66bebeb76a | ||
|
|
b0aa915dea | ||
|
|
d93ae756e6 | ||
|
|
1244e66bd4 | ||
|
|
a001037319 | ||
|
|
20151384d7 | ||
|
|
d895614d19 | ||
|
|
9d0c1d2dc9 | ||
|
|
a7296bddc2 | ||
|
|
c9473367b1 | ||
|
|
f77659463a | ||
|
|
ccdaf14eff | ||
|
|
cacdf96f9c | ||
|
|
36ee083753 | ||
|
|
e8a21146d3 | ||
|
|
a0958c0607 | ||
|
|
620b118c70 | ||
|
|
888fbc07b5 | ||
|
|
ab2d7821a7 | ||
|
|
6fc7610b1c | ||
|
|
0da5078cad | ||
|
|
d0728b0ba0 | ||
|
|
9224027e45 | ||
|
|
5c3e2612da | ||
|
|
65321bf975 | ||
|
|
2b7d1cdd2f | ||
|
|
a653b209ba | ||
|
|
f071581aea | ||
|
|
f0a7581b50 | ||
|
|
474b88326f | ||
|
|
bdc03997c9 | ||
|
|
3f1cf00d97 | ||
|
|
6b47c7361e | ||
|
|
7677ceea60 | ||
|
|
aee55eda39 | ||
|
|
d09dda5a08 | ||
|
|
12950cc602 | ||
|
|
e8ee781a42 | ||
|
|
02e71cebed | ||
|
|
259d4d2029 | ||
|
|
3aed74a6fc | ||
|
|
13b0d7ec8f | ||
|
|
71cd6e6feb | ||
|
|
99054e19eb | ||
|
|
7a1321e2f9 | ||
|
|
cb5031f22f | ||
|
|
f1618ec540 | ||
|
|
8d82a0d483 | ||
|
|
0a1e475a30 | ||
|
|
6166ea67a8 | ||
|
|
d77d9bfc00 | ||
|
|
aa3e3cfa40 | ||
|
|
14ba1d4b45 | ||
|
|
18da9f5e59 | ||
|
|
d3a2b9fae0 | ||
|
|
7014d07cab | ||
|
|
35784d1c33 | ||
|
|
8858846607 | ||
|
|
ffe6ca986e | ||
|
|
7790d67f94 | ||
|
|
1132fb801b | ||
|
|
1d37aa8403 | ||
|
|
cb95198398 | ||
|
|
d002fa902f | ||
|
|
8d100c58de | ||
|
|
5fd1e67808 | ||
|
|
eeb996034b | ||
|
|
03fba07d15 | ||
|
|
c481a2715d | ||
|
|
8ee8ca7c83 | ||
|
|
4121d4151f | ||
|
|
bd18faa2a0 | ||
|
|
f1f1f75782 | ||
|
|
4ba14adec6 | ||
|
|
457677c1b7 | ||
|
|
8327925ab7 | ||
|
|
122e80e04d | ||
|
|
c4417ea93c | ||
|
|
7a62d3dbd6 | ||
|
|
2428984205 | ||
|
|
ea3cd1ebba | ||
|
|
3e454d7568 | ||
|
|
08638ccc88 | ||
|
|
ee3fe20af4 | ||
|
|
1e7d8ba9a6 | ||
|
|
16e178a8c2 | ||
|
|
5fc5ef2b52 | ||
|
|
9bcf8f867d | ||
|
|
092e9ee0e6 | ||
|
|
10d8c3cbfa | ||
|
|
555c6d3c20 | ||
|
|
dc131ac42a | ||
|
|
14a8bbc21a | ||
|
|
1de1182a9f | ||
|
|
71c2221f8c | ||
|
|
6ea6f9f7bc | ||
|
|
975b6129f6 | ||
|
|
b63a48b7d3 | ||
|
|
9de562f747 | ||
|
|
141943a7e1 | ||
|
|
6928f4c438 | ||
|
|
14dd89a1ee | ||
|
|
c4e149d4f1 | ||
|
|
9c6efadec3 | ||
|
|
91b37b2d81 | ||
|
|
1e1fd30def | ||
|
|
66265aaac4 | ||
|
|
8dac0fb3f1 | ||
|
|
68fee3e44b | ||
|
|
13855ef0c3 | ||
|
|
34a02efcf9 | ||
|
|
859e434932 | ||
|
|
160fc7f246 | ||
|
|
73966e693c | ||
|
|
007c5a85d5 | ||
|
|
e80c150c44 | ||
|
|
9f8fd08955 | ||
|
|
5d78b34a6f | ||
|
|
bedd893cd1 | ||
|
|
1e957c0c23 | ||
|
|
f765e8fa9d | ||
|
|
aa8c9bb4a9 | ||
|
|
2c180d645e | ||
|
|
f152d6ed3d | ||
|
|
4d6f28cdde | ||
|
|
bf8d4716a7 | ||
|
|
4ec5fdda8d | ||
|
|
ee579c77c1 | ||
|
|
9787552b00 | ||
|
|
8b84457b17 | ||
|
|
e0186df56b | ||
|
|
fcd018be47 | ||
|
|
0990ab146c | ||
|
|
ee8aa54f53 | ||
|
|
5b7d5f7729 | ||
|
|
e0889384d9 | ||
|
|
74c7198906 | ||
|
|
902b57d107 | ||
|
|
1f5a163f42 | ||
|
|
25de47878b | ||
|
|
42d049f618 | ||
|
|
77f5fc3d55 | ||
|
|
6f08e11d7c | ||
|
|
3c752238c5 | ||
|
|
12c92b6c19 | ||
|
|
1eca98ec56 | ||
|
|
289960bc60 | ||
|
|
2274d2b966 | ||
|
|
a2082bc1f8 | ||
|
|
d311f22182 | ||
|
|
db6512aa35 | ||
|
|
99b1467b63 | ||
|
|
b1e90b3075 | ||
|
|
a4eb6d0fb1 | ||
|
|
bb597b1286 | ||
|
|
efb48566d0 | ||
|
|
0e916d0d55 | ||
|
|
e62f8f143f | ||
|
|
2be66a38d8 | ||
|
|
9ccc4b1616 | ||
|
|
9bb623381b | ||
|
|
4ab78572e7 | ||
|
|
4a15fce516 | ||
|
|
c06c666ce5 | ||
|
|
d206df8d3d | ||
|
|
39b19cf764 | ||
|
|
a4798802ef | ||
|
|
55f6f91f17 | ||
|
|
26cee2e878 | ||
|
|
75734fbcf1 | ||
|
|
a0c2281540 | ||
|
|
6cd56821dc | ||
|
|
2a2c0d1a94 | ||
|
|
525109e506 | ||
|
|
8842a0d986 | ||
|
|
716a316654 | ||
|
|
30fdc2dbe7 | ||
|
|
54e730f6e4 | ||
|
|
e787249af1 | ||
|
|
27aa4d38bf | ||
|
|
ebb404527f | ||
|
|
6168c846b2 | ||
|
|
cb9812593f | ||
|
|
ed200bf2c4 | ||
|
|
7a3d8e5a99 | ||
|
|
d677dadf5f | ||
|
|
1d54ac93bb | ||
|
|
320dc31822 | ||
|
|
5cd4083457 | ||
|
|
76e7e4e9e6 | ||
|
|
7c1cddf1b7 | ||
|
|
c9dac59008 | ||
|
|
7a6c06cadd | ||
|
|
46cbf0e4aa | ||
|
|
dc396835ed | ||
|
|
27ce58f86e | ||
|
|
e4e28a6ff5 | ||
|
|
acc457f645 | ||
|
|
acc8fb3ead | ||
|
|
79c07a8ade | ||
|
|
a77a263e24 | ||
|
|
46ff0f7a3c | ||
|
|
b664dbcc36 | ||
|
|
338cef35b4 | ||
|
|
ee5eedfa04 | ||
|
|
ffde8a6a09 | ||
|
|
d084172b63 | ||
|
|
4457e64e13 | ||
|
|
bc98f90ba3 | ||
|
|
cc55823486 | ||
|
|
aa165539f6 | ||
|
|
7791d92711 | ||
|
|
f24e38876a | ||
|
|
5b1de2ae93 | ||
|
|
f4b2e553e7 | ||
|
|
5d2262af34 | ||
|
|
6019147b66 | ||
|
|
ebcee4f610 | ||
|
|
e800f6bb57 | ||
|
|
04bc5f1a95 | ||
|
|
b36e95caa9 | ||
|
|
8cfb2fa1b7 | ||
|
|
96af8f31ae | ||
|
|
b5aef4cf97 | ||
|
|
3904f2cd40 | ||
|
|
d2c7379f1c | ||
|
|
a50eabbd48 | ||
|
|
4f1821db3e | ||
|
|
bf402f902e | ||
|
|
389a568f9a | ||
|
|
4b9517db85 | ||
|
|
7538f3df58 | ||
|
|
24916c6703 | ||
|
|
b60df8bb4f | ||
|
|
9604cb833b | ||
|
|
29aa9d6750 | ||
|
|
f2d0c13a15 | ||
|
|
9a5e35d1ba | ||
|
|
74321e546d | ||
|
|
a78ccb993c | ||
|
|
16c59118eb | ||
|
|
c0bb26e85b | ||
|
|
72175c57bd | ||
|
|
af2c05e5f3 | ||
|
|
b63c7f10bc | ||
|
|
fc8fd49328 | ||
|
|
c5f35a72da | ||
|
|
81064017a9 | ||
|
|
381aedcc61 | ||
|
|
e8d77002ea | ||
|
|
8fce8c6771 | ||
|
|
5d93916665 | ||
|
|
a032583b17 | ||
|
|
390ee8d971 | ||
|
|
6c1ba9731d | ||
|
|
800b0ff3b9 | ||
|
|
5f21eab491 | ||
|
|
11483b0fb8 | ||
|
|
e4caa41aa9 | ||
|
|
19eb82e68b | ||
|
|
bd68a38723 | ||
|
|
adf2dc13de | ||
|
|
ef0593db58 | ||
|
|
75a44fe951 | ||
|
|
3b1fcb2a65 | ||
|
|
68f348357e | ||
|
|
da7beb1c38 | ||
|
|
5e6d23f27d | ||
|
|
d04f657424 | ||
|
|
c6f700b7cb | ||
|
|
2a0d6788f7 | ||
|
|
c0fdbaac85 | ||
|
|
b64c4b4750 | ||
|
|
70834cd741 | ||
|
|
d45ece0e58 | ||
|
|
4796b7eb15 | ||
|
|
644723adda | ||
|
|
bffc3c24a0 | ||
|
|
a1520357c8 | ||
|
|
16a293cc3a | ||
|
|
9308bf32e5 | ||
|
|
182fc06769 | ||
|
|
5536420bee | ||
|
|
9f0f3c7e29 | ||
|
|
85e36b0f50 | ||
|
|
96b72edac8 | ||
|
|
5bfcb898ad | ||
|
|
60fc15a56b | ||
|
|
2445b997ee | ||
|
|
6721b991ab | ||
|
|
daf733b52e | ||
|
|
47f69fe0d8 | ||
|
|
672fcbb8dc | ||
|
|
13254715a2 | ||
|
|
2c9b84c3a8 | ||
|
|
79d8556c22 | ||
|
|
2a5d59b3d7 | ||
|
|
1141b08eb8 | ||
|
|
3bf1d98dbf | ||
|
|
a7ab93479b | ||
|
|
c0fcf76e93 | ||
|
|
b1dfb8ea1e | ||
|
|
5070004e8a | ||
|
|
2f976c5174 | ||
|
|
6d0ebbca1e | ||
|
|
1e3e05b0c3 | ||
|
|
c39521b70d | ||
|
|
ee282a1d2e | ||
|
|
c314222796 | ||
|
|
32f8f39974 | ||
|
|
6f7fe82830 | ||
|
|
62b16fcc6b | ||
|
|
99ce84ef23 | ||
|
|
03c41e725e | ||
|
|
86ca44d451 | ||
|
|
85f5d14cef | ||
|
|
f788d0982d | ||
|
|
c9619349d6 | ||
|
|
c93d9e66e4 | ||
|
|
8955bc1866 | ||
|
|
730c551819 | ||
|
|
7e9e69c758 | ||
|
|
f055f2a1e3 | ||
|
|
92ac0fc9bd | ||
|
|
fb3df898b5 | ||
|
|
9d145b9630 | ||
|
|
22fa32e164 | ||
|
|
d3520a784f | ||
|
|
a75b32a54a | ||
|
|
4530d851e4 | ||
|
|
ad50702934 | ||
|
|
68e0ae3286 | ||
|
|
b33d2346db | ||
|
|
f58c40b4e3 | ||
|
|
9ef93ecd7c | ||
|
|
2115fb76de | ||
|
|
af620db9c7 | ||
|
|
398b2b9c51 | ||
|
|
7b1066341b | ||
|
|
d5b2a93c6d | ||
|
|
57c13b4ef8 | ||
|
|
168e9ed3a5 | ||
|
|
1e750f12f6 | ||
|
|
3b3ed72d35 | ||
|
|
e1190c8f3c | ||
|
|
2b87e330b0 | ||
|
|
aeeda370aa | ||
|
|
d2db561347 | ||
|
|
f5ff7f178b | ||
|
|
753edf9c80 | ||
|
|
aa358f2be4 | ||
|
|
60103fc4a5 | ||
|
|
4964ba74db | ||
|
|
d90379210a | ||
|
|
987099cfcd | ||
|
|
0cd3f93361 | ||
|
|
5d4133d82f | ||
|
|
bcac6c3aff | ||
|
|
efb4c12abe | ||
|
|
9ac302cb97 | ||
|
|
7ee2822ec2 | ||
|
|
3b7b933aa2 | ||
|
|
3c42bf8d97 | ||
|
|
4bb3d5c488 | ||
|
|
f824f6d925 | ||
|
|
f9aea3db07 | ||
|
|
9eda8f2fe8 | ||
|
|
86326269a1 | ||
|
|
4c97a9ee53 | ||
|
|
0deb98ac0c | ||
|
|
75c7c3a1a7 | ||
|
|
abe7566d7d | ||
|
|
360a70c8a8 | ||
|
|
1c2b9cc9ab | ||
|
|
401d469a92 | ||
|
|
b108b4d010 | ||
|
|
976b456619 | ||
|
|
5da7eb97cb | ||
|
|
a7b4175091 | ||
|
|
12e0c28a6e | ||
|
|
a349fce880 | ||
|
|
7545b1d29b | ||
|
|
d5be160af0 | ||
|
|
cd6812342e | ||
|
|
abb3066150 | ||
|
|
bf7763d9b0 | ||
|
|
59d7adff8f | ||
|
|
60db79a38a | ||
|
|
bc4cd9c5cc | ||
|
|
cb6cf4b631 | ||
|
|
0bce28cd30 | ||
|
|
8711c61298 | ||
|
|
3ab49c0036 | ||
|
|
61daa16e5d | ||
|
|
51e75cf59d | ||
|
|
6a1a0d977a | ||
|
|
dd4d4411c9 | ||
|
|
b03c801523 | ||
|
|
41f7620989 | ||
|
|
066a5a209f | ||
|
|
9b3a025f9c | ||
|
|
ad7f2ec67d | ||
|
|
bd5c92a113 | ||
|
|
a4bcb45f65 | ||
|
|
7193634ae6 | ||
|
|
1fcf875fe3 | ||
|
|
255ad39ae3 | ||
|
|
c2d43544cc | ||
|
|
3c917204dc | ||
|
|
8698cb9b28 | ||
|
|
710197e18c | ||
|
|
48d6ea427f | ||
|
|
0a4ee864e9 | ||
|
|
b3e53ffca0 | ||
|
|
e162893d7f | ||
|
|
db6f46c1a6 | ||
|
|
94452a94b1 | ||
|
|
50484be330 | ||
|
|
9b82707ea6 | ||
|
|
505a2e8743 | ||
|
|
677408bfc9 | ||
|
|
883e90d06e | ||
|
|
2b08e9e265 | ||
|
|
ae4c0ed25a | ||
|
|
a34e650f8b | ||
|
|
1007a715a5 | ||
|
|
ca798bc6ea | ||
|
|
4fe8403bfb | ||
|
|
fe4f10047b | ||
|
|
a3bae56a48 | ||
|
|
a70b7a688e | ||
|
|
0c2ebe5f47 | ||
|
|
3d54784e6d | ||
|
|
9ab7a6df39 | ||
|
|
6b46b5e9ce | ||
|
|
109a70fc64 | ||
|
|
86ee4f0daa | ||
|
|
93d0ad97fe | ||
|
|
3dfd055411 | ||
|
|
90559fde70 | ||
|
|
e8a8286012 | ||
|
|
2ae718796e | ||
|
|
74749c909d | ||
|
|
cf38981bb7 | ||
|
|
b483bf5095 | ||
|
|
093ae04d58 | ||
|
|
ff0c06b1e5 | ||
|
|
e271f75bee | ||
|
|
c6660df58e | ||
|
|
aa6415aa7d | ||
|
|
226802f0c4 | ||
|
|
01783d67fc | ||
|
|
616d06d7fe | ||
|
|
5564d9e404 | ||
|
|
9f791b6ad5 | ||
|
|
74c4cbb859 | ||
|
|
ddfbca38df | ||
|
|
931b41b30f | ||
|
|
6a66d8e2ca | ||
|
|
858ce264ef | ||
|
|
55705c0f5e | ||
|
|
62c8a67f56 | ||
|
|
3e835a1aa1 | ||
|
|
39f6c4169d | ||
|
|
e25a5966b5 | ||
|
|
a56ff199a7 | ||
|
|
60ba02f5db | ||
|
|
70761af8cf | ||
|
|
bf839676c7 | ||
|
|
f01f12ce1e | ||
|
|
7a0b36501f | ||
|
|
3b7b276f6f | ||
|
|
6605ae22f6 | ||
|
|
c2b2e3266c | ||
|
|
c5e0acf6f0 | ||
|
|
aacc6198b9 | ||
|
|
8235bae48e | ||
|
|
5ee6e22983 | ||
|
|
bd4b68cd54 | ||
|
|
d96f67b06f | ||
|
|
14f0cdad58 | ||
|
|
893299c3c9 | ||
|
|
dd25d08c06 | ||
|
|
158701ab3c | ||
|
|
a54deba6bc | ||
|
|
c6b7db6587 | ||
|
|
722c8f50ea | ||
|
|
56ac94e014 | ||
|
|
ea96133890 | ||
|
|
e2304ebcdb | ||
|
|
c437b1aab7 | ||
|
|
42a379c75c | ||
|
|
3e7bb7690c | ||
|
|
19356b6445 | ||
|
|
9ff249a38d | ||
|
|
892bd4c29b | ||
|
|
ada03dd273 | ||
|
|
e09c6bb58b | ||
|
|
1c661fd849 | ||
|
|
7a0af56177 | ||
|
|
6838804116 | ||
|
|
570d45b2a1 | ||
|
|
9944ad7f5f | ||
|
|
764f1958dd | ||
|
|
c374c98389 | ||
|
|
af65cac609 | ||
|
|
79a64207f5 | ||
|
|
c8c67dde6f | ||
|
|
fbeeb6da75 | ||
|
|
551640a030 | ||
|
|
c4f2bc9540 | ||
|
|
32966a08a9 | ||
|
|
9ef15691d6 | ||
|
|
338180f383 | ||
|
|
513e491ce9 | ||
|
|
694ae87748 | ||
|
|
c816d03699 | ||
|
|
8171efd07a | ||
|
|
b61de9728e | ||
|
|
c72bcda4f2 | ||
|
|
9a877c7adb | ||
|
|
4a77a3ab19 | ||
|
|
181a61982f | ||
|
|
f40b2c6f9d | ||
|
|
d1b7a934aa | ||
|
|
83643cbdfe | ||
|
|
b5e2ba3a47 | ||
|
|
e4279f80cd | ||
|
|
984c7a9d42 | ||
|
|
8e89178047 | ||
|
|
73c76b9628 | ||
|
|
7114aed78f | ||
|
|
e002c855bd | ||
|
|
c417803908 | ||
|
|
4160b700e6 | ||
|
|
1055b9a309 | ||
|
|
46c9784127 | ||
|
|
712aa0c529 | ||
|
|
f9a6d5c845 |
@@ -5,10 +5,10 @@ services:
|
||||
dockerfile: libs/langchain/dev.Dockerfile
|
||||
context: ..
|
||||
volumes:
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
- ..:/workspaces/langchain:cached
|
||||
networks:
|
||||
- langchain-network
|
||||
- langchain-network
|
||||
# environment:
|
||||
# MONGO_ROOT_USERNAME: root
|
||||
# MONGO_ROOT_PASSWORD: example123
|
||||
@@ -28,5 +28,3 @@ services:
|
||||
networks:
|
||||
langchain-network:
|
||||
driver: bridge
|
||||
|
||||
|
||||
|
||||
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -4,9 +4,6 @@ contact_links:
|
||||
- name: 🤔 Question or Problem
|
||||
about: Ask a question or ask about a problem in GitHub Discussions.
|
||||
url: https://www.github.com/langchain-ai/langchain/discussions/categories/q-a
|
||||
- name: Discord
|
||||
url: https://discord.gg/6adMQxSpJS
|
||||
about: General community discussions
|
||||
- name: Feature Request
|
||||
url: https://www.github.com/langchain-ai/langchain/discussions/categories/ideas
|
||||
about: Suggest a feature or an idea
|
||||
|
||||
39
.github/actions/people/app/main.py
vendored
39
.github/actions/people/app/main.py
vendored
@@ -350,11 +350,7 @@ def get_graphql_pr_edges(*, settings: Settings, after: Union[str, None] = None):
|
||||
print("Querying PRs...")
|
||||
else:
|
||||
print(f"Querying PRs with cursor {after}...")
|
||||
data = get_graphql_response(
|
||||
settings=settings,
|
||||
query=prs_query,
|
||||
after=after
|
||||
)
|
||||
data = get_graphql_response(settings=settings, query=prs_query, after=after)
|
||||
graphql_response = PRsResponse.model_validate(data)
|
||||
return graphql_response.data.repository.pullRequests.edges
|
||||
|
||||
@@ -484,10 +480,16 @@ def get_contributors(settings: Settings):
|
||||
lines_changed = pr.additions + pr.deletions
|
||||
score = _logistic(files_changed, 20) + _logistic(lines_changed, 100)
|
||||
contributor_scores[pr.author.login] += score
|
||||
three_months_ago = (datetime.now(timezone.utc) - timedelta(days=3*30))
|
||||
three_months_ago = datetime.now(timezone.utc) - timedelta(days=3 * 30)
|
||||
if pr.createdAt > three_months_ago:
|
||||
recent_contributor_scores[pr.author.login] += score
|
||||
return contributors, contributor_scores, recent_contributor_scores, reviewers, authors
|
||||
return (
|
||||
contributors,
|
||||
contributor_scores,
|
||||
recent_contributor_scores,
|
||||
reviewers,
|
||||
authors,
|
||||
)
|
||||
|
||||
|
||||
def get_top_users(
|
||||
@@ -524,9 +526,13 @@ if __name__ == "__main__":
|
||||
# question_commentors, question_last_month_commentors, question_authors = get_experts(
|
||||
# settings=settings
|
||||
# )
|
||||
contributors, contributor_scores, recent_contributor_scores, reviewers, pr_authors = get_contributors(
|
||||
settings=settings
|
||||
)
|
||||
(
|
||||
contributors,
|
||||
contributor_scores,
|
||||
recent_contributor_scores,
|
||||
reviewers,
|
||||
pr_authors,
|
||||
) = get_contributors(settings=settings)
|
||||
# authors = {**question_authors, **pr_authors}
|
||||
authors = {**pr_authors}
|
||||
maintainers_logins = {
|
||||
@@ -547,6 +553,7 @@ if __name__ == "__main__":
|
||||
"obi1kenobi",
|
||||
"langchain-infra",
|
||||
"jacoblee93",
|
||||
"isahers1",
|
||||
"dqbd",
|
||||
"bracesproul",
|
||||
"akira",
|
||||
@@ -558,7 +565,7 @@ if __name__ == "__main__":
|
||||
maintainers.append(
|
||||
{
|
||||
"login": login,
|
||||
"count": contributors[login], #+ question_commentors[login],
|
||||
"count": contributors[login], # + question_commentors[login],
|
||||
"avatarUrl": user.avatarUrl,
|
||||
"twitterUsername": user.twitterUsername,
|
||||
"url": user.url,
|
||||
@@ -614,9 +621,7 @@ if __name__ == "__main__":
|
||||
new_people_content = yaml.dump(
|
||||
people, sort_keys=False, width=200, allow_unicode=True
|
||||
)
|
||||
if (
|
||||
people_old_content == new_people_content
|
||||
):
|
||||
if people_old_content == new_people_content:
|
||||
logging.info("The LangChain People data hasn't changed, finishing.")
|
||||
sys.exit(0)
|
||||
people_path.write_text(new_people_content, encoding="utf-8")
|
||||
@@ -629,9 +634,7 @@ if __name__ == "__main__":
|
||||
logging.info(f"Creating a new branch {branch_name}")
|
||||
subprocess.run(["git", "checkout", "-B", branch_name], check=True)
|
||||
logging.info("Adding updated file")
|
||||
subprocess.run(
|
||||
["git", "add", str(people_path)], check=True
|
||||
)
|
||||
subprocess.run(["git", "add", str(people_path)], check=True)
|
||||
logging.info("Committing updated file")
|
||||
message = "👥 Update LangChain people data"
|
||||
result = subprocess.run(["git", "commit", "-m", message], check=True)
|
||||
@@ -640,4 +643,4 @@ if __name__ == "__main__":
|
||||
logging.info("Creating PR")
|
||||
pr = repo.create_pull(title=message, body=message, base="master", head=branch_name)
|
||||
logging.info(f"Created PR: {pr.number}")
|
||||
logging.info("Finished")
|
||||
logging.info("Finished")
|
||||
|
||||
150
.github/scripts/check_diff.py
vendored
150
.github/scripts/check_diff.py
vendored
@@ -1,7 +1,13 @@
|
||||
import glob
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
from typing import Dict
|
||||
import re
|
||||
import sys
|
||||
import tomllib
|
||||
from collections import defaultdict
|
||||
from typing import Dict, List, Set
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
LANGCHAIN_DIRS = [
|
||||
"libs/core",
|
||||
@@ -11,6 +17,112 @@ LANGCHAIN_DIRS = [
|
||||
"libs/experimental",
|
||||
]
|
||||
|
||||
|
||||
def all_package_dirs() -> Set[str]:
|
||||
return {
|
||||
"/".join(path.split("/")[:-1]).lstrip("./")
|
||||
for path in glob.glob("./libs/**/pyproject.toml", recursive=True)
|
||||
if "libs/cli" not in path and "libs/standard-tests" not in path
|
||||
}
|
||||
|
||||
|
||||
def dependents_graph() -> dict:
|
||||
"""
|
||||
Construct a mapping of package -> dependents, such that we can
|
||||
run tests on all dependents of a package when a change is made.
|
||||
"""
|
||||
dependents = defaultdict(set)
|
||||
|
||||
for path in glob.glob("./libs/**/pyproject.toml", recursive=True):
|
||||
if "template" in path:
|
||||
continue
|
||||
|
||||
# load regular and test deps from pyproject.toml
|
||||
with open(path, "rb") as f:
|
||||
pyproject = tomllib.load(f)["tool"]["poetry"]
|
||||
|
||||
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
|
||||
for dep in [
|
||||
*pyproject["dependencies"].keys(),
|
||||
*pyproject["group"]["test"]["dependencies"].keys(),
|
||||
]:
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
continue
|
||||
|
||||
# load extended deps from extended_testing_deps.txt
|
||||
package_path = Path(path).parent
|
||||
extended_requirement_path = package_path / "extended_testing_deps.txt"
|
||||
if extended_requirement_path.exists():
|
||||
with open(extended_requirement_path, "r") as f:
|
||||
extended_deps = f.read().splitlines()
|
||||
for depline in extended_deps:
|
||||
if depline.startswith("-e "):
|
||||
# editable dependency
|
||||
assert depline.startswith(
|
||||
"-e ../partners/"
|
||||
), "Extended test deps should only editable install partner packages"
|
||||
partner = depline.split("partners/")[1]
|
||||
dep = f"langchain-{partner}"
|
||||
else:
|
||||
dep = depline.split("==")[0]
|
||||
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
return dependents
|
||||
|
||||
|
||||
def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
|
||||
updated = set()
|
||||
for dir_ in dirs_to_eval:
|
||||
# handle core manually because it has so many dependents
|
||||
if "core" in dir_:
|
||||
updated.add(dir_)
|
||||
continue
|
||||
pkg = "langchain-" + dir_.split("/")[-1]
|
||||
updated.update(dependents[pkg])
|
||||
updated.add(dir_)
|
||||
return list(updated)
|
||||
|
||||
|
||||
def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
min_python = "3.8"
|
||||
max_python = "3.12"
|
||||
|
||||
# custom logic for specific directories
|
||||
if dir_ == "libs/partners/milvus":
|
||||
# milvus poetry doesn't allow 3.12 because they
|
||||
# declare deps in funny way
|
||||
max_python = "3.11"
|
||||
|
||||
return [
|
||||
{"working-directory": dir_, "python-version": min_python},
|
||||
{"working-directory": dir_, "python-version": max_python},
|
||||
]
|
||||
|
||||
|
||||
def _get_configs_for_multi_dirs(
|
||||
job: str, dirs_to_run: List[str], dependents: dict
|
||||
) -> List[Dict[str, str]]:
|
||||
if job == "lint":
|
||||
dirs = add_dependents(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
|
||||
dependents,
|
||||
)
|
||||
elif job in ["test", "compile-integration-tests", "dependencies"]:
|
||||
dirs = add_dependents(
|
||||
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
)
|
||||
elif job == "extended-tests":
|
||||
dirs = list(dirs_to_run["extended-test"])
|
||||
else:
|
||||
raise ValueError(f"Unknown job: {job}")
|
||||
|
||||
return [
|
||||
config for dir_ in dirs for config in _get_configs_for_single_dir(job, dir_)
|
||||
]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
files = sys.argv[1:]
|
||||
|
||||
@@ -21,10 +133,11 @@ if __name__ == "__main__":
|
||||
}
|
||||
docs_edited = False
|
||||
|
||||
if len(files) == 300:
|
||||
if len(files) >= 300:
|
||||
# max diff length is 300 files - there are likely files missing
|
||||
raise ValueError("Max diff reached. Please manually run CI on changed libs.")
|
||||
|
||||
dirs_to_run["lint"] = all_package_dirs()
|
||||
dirs_to_run["test"] = all_package_dirs()
|
||||
dirs_to_run["extended-test"] = set(LANGCHAIN_DIRS)
|
||||
for file in files:
|
||||
if any(
|
||||
file.startswith(dir_)
|
||||
@@ -81,14 +194,25 @@ if __name__ == "__main__":
|
||||
docs_edited = True
|
||||
dirs_to_run["lint"].add(".")
|
||||
|
||||
outputs = {
|
||||
"dirs-to-lint": list(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"]
|
||||
),
|
||||
"dirs-to-test": list(dirs_to_run["test"] | dirs_to_run["extended-test"]),
|
||||
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
|
||||
"docs-edited": "true" if docs_edited else "",
|
||||
dependents = dependents_graph()
|
||||
|
||||
# we now have dirs_by_job
|
||||
# todo: clean this up
|
||||
|
||||
map_job_to_configs = {
|
||||
job: _get_configs_for_multi_dirs(job, dirs_to_run, dependents)
|
||||
for job in [
|
||||
"lint",
|
||||
"test",
|
||||
"extended-tests",
|
||||
"compile-integration-tests",
|
||||
"dependencies",
|
||||
]
|
||||
}
|
||||
for key, value in outputs.items():
|
||||
map_job_to_configs["test-doc-imports"] = (
|
||||
[{"python-version": "3.12"}] if docs_edited else []
|
||||
)
|
||||
|
||||
for key, value in map_job_to_configs.items():
|
||||
json_output = json.dumps(value)
|
||||
print(f"{key}={json_output}")
|
||||
|
||||
35
.github/scripts/check_prerelease_dependencies.py
vendored
Normal file
35
.github/scripts/check_prerelease_dependencies.py
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
import sys
|
||||
import tomllib
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
|
||||
# read toml file
|
||||
with open(toml_file, "rb") as file:
|
||||
toml_data = tomllib.load(file)
|
||||
|
||||
# see if we're releasing an rc
|
||||
version = toml_data["tool"]["poetry"]["version"]
|
||||
releasing_rc = "rc" in version
|
||||
|
||||
# if not, iterate through dependencies and make sure none allow prereleases
|
||||
if not releasing_rc:
|
||||
dependencies = toml_data["tool"]["poetry"]["dependencies"]
|
||||
for lib in dependencies:
|
||||
dep_version = dependencies[lib]
|
||||
dep_version_string = (
|
||||
dep_version["version"] if isinstance(dep_version, dict) else dep_version
|
||||
)
|
||||
|
||||
if "rc" in dep_version_string:
|
||||
raise ValueError(
|
||||
f"Dependency {lib} has a prerelease version. Please remove this."
|
||||
)
|
||||
|
||||
if isinstance(dep_version, dict) and dep_version.get(
|
||||
"allow-prereleases", False
|
||||
):
|
||||
raise ValueError(
|
||||
f"Dependency {lib} has allow-prereleases set to true. Please remove this."
|
||||
)
|
||||
5
.github/scripts/get_min_versions.py
vendored
5
.github/scripts/get_min_versions.py
vendored
@@ -9,6 +9,7 @@ MIN_VERSION_LIBS = [
|
||||
"langchain-community",
|
||||
"langchain",
|
||||
"langchain-text-splitters",
|
||||
"SQLAlchemy",
|
||||
]
|
||||
|
||||
|
||||
@@ -74,6 +75,4 @@ if __name__ == "__main__":
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
)
|
||||
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))
|
||||
|
||||
18
.github/workflows/_compile_integration_test.yml
vendored
18
.github/workflows/_compile_integration_test.yml
vendored
@@ -7,6 +7,10 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -17,22 +21,14 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: compile-integration
|
||||
|
||||
18
.github/workflows/_dependencies.yml
vendored
18
.github/workflows/_dependencies.yml
vendored
@@ -11,6 +11,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -21,22 +25,14 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: dependency checks ${{ matrix.python-version }}
|
||||
name: dependency checks ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: pydantic-cross-compat
|
||||
|
||||
23
.github/workflows/_integration_test.yml
vendored
23
.github/workflows/_integration_test.yml
vendored
@@ -6,30 +6,28 @@ on:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
environment: Scheduled testing
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
name: Python ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
@@ -53,8 +51,15 @@ jobs:
|
||||
shell: bash
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
26
.github/workflows/_lint.yml
vendored
26
.github/workflows/_lint.yml
vendored
@@ -11,6 +11,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -21,27 +25,15 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: "make lint #${{ matrix.python-version }}"
|
||||
name: "make lint #${{ inputs.python-version }}"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
# Only lint on the min and max supported Python versions.
|
||||
# It's extremely unlikely that there's a lint issue on any version in between
|
||||
# that doesn't show up on the min or max versions.
|
||||
#
|
||||
# GitHub rate-limits how many jobs can be running at any one time.
|
||||
# Starting new jobs is also relatively slow,
|
||||
# so linting on fewer versions makes CI faster.
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.12"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: lint-with-extras
|
||||
@@ -86,7 +78,7 @@ jobs:
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
@@ -120,7 +112,7 @@ jobs:
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache_test
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
15
.github/workflows/_release.yml
vendored
15
.github/workflows/_release.yml
vendored
@@ -122,7 +122,6 @@ jobs:
|
||||
fi
|
||||
{
|
||||
echo 'release-body<<EOF'
|
||||
echo "# Release $TAG"
|
||||
echo $PREAMBLE
|
||||
echo
|
||||
git log --format="%s" "$PREV_TAG"..HEAD -- $WORKING_DIR
|
||||
@@ -135,6 +134,7 @@ jobs:
|
||||
- release-notes
|
||||
uses:
|
||||
./.github/workflows/_test_release.yml
|
||||
permissions: write-all
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
dangerous-nonmaster-release: ${{ inputs.dangerous-nonmaster-release }}
|
||||
@@ -189,7 +189,7 @@ jobs:
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION" || \
|
||||
( \
|
||||
sleep 5 && \
|
||||
sleep 15 && \
|
||||
poetry run pip install \
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION" \
|
||||
@@ -202,7 +202,7 @@ jobs:
|
||||
poetry run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))"
|
||||
|
||||
- name: Import test dependencies
|
||||
run: poetry install --with test,test_integration
|
||||
run: poetry install --with test
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
# Overwrite the local version of the package with the test PyPI version.
|
||||
@@ -221,6 +221,11 @@ jobs:
|
||||
run: make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Check for prerelease versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry run python $GITHUB_WORKSPACE/.github/scripts/check_prerelease_dependencies.py pyproject.toml
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
@@ -245,6 +250,10 @@ jobs:
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Import integration test dependencies
|
||||
run: poetry install --with test,test_integration
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Run integration tests
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
env:
|
||||
|
||||
18
.github/workflows/_test.yml
vendored
18
.github/workflows/_test.yml
vendored
@@ -11,6 +11,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -21,22 +25,14 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "make test #${{ matrix.python-version }}"
|
||||
name: "make test #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
15
.github/workflows/_test_doc_imports.yml
vendored
15
.github/workflows/_test_doc_imports.yml
vendored
@@ -2,6 +2,11 @@ name: test_doc_imports
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -9,18 +14,14 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.12"
|
||||
name: "check doc imports #${{ matrix.python-version }}"
|
||||
name: "check doc imports #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: core
|
||||
|
||||
|
||||
85
.github/workflows/check_diffs.yml
vendored
85
.github/workflows/check_diffs.yml
vendored
@@ -26,98 +26,103 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: '3.11'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- id: set-matrix
|
||||
run: |
|
||||
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
|
||||
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
|
||||
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
|
||||
docs-edited: ${{ steps.set-matrix.outputs.docs-edited }}
|
||||
lint: ${{ steps.set-matrix.outputs.lint }}
|
||||
test: ${{ steps.set-matrix.outputs.test }}
|
||||
extended-tests: ${{ steps.set-matrix.outputs.extended-tests }}
|
||||
compile-integration-tests: ${{ steps.set-matrix.outputs.compile-integration-tests }}
|
||||
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
|
||||
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
|
||||
lint:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-lint != '[]' }}
|
||||
if: ${{ needs.build.outputs.lint != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-lint) }}
|
||||
job-configs: ${{ fromJson(needs.build.outputs.lint) }}
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
if: ${{ needs.build.outputs.test != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
job-configs: ${{ fromJson(needs.build.outputs.test) }}
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
test-doc-imports:
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' || needs.build.outputs.docs-edited }}
|
||||
uses: ./.github/workflows/_test_doc_imports.yml
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
if: ${{ needs.build.outputs.test-doc-imports != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
|
||||
uses: ./.github/workflows/_test_doc_imports.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
|
||||
compile-integration-tests:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.compile-integration-tests != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }}
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
dependencies:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
if: ${{ needs.build.outputs.dependencies != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
job-configs: ${{ fromJson(needs.build.outputs.dependencies) }}
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
name: "cd ${{ matrix.working-directory }} / make extended_tests #${{ matrix.python-version }}"
|
||||
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-extended-test != '[]' }}
|
||||
if: ${{ needs.build.outputs.extended-tests != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
# note different variable for extended test dirs
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-extended-test) }}
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }}
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ matrix.job-configs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
|
||||
5
.github/workflows/check_new_docs.yml
vendored
5
.github/workflows/check_new_docs.yml
vendored
@@ -26,6 +26,11 @@ jobs:
|
||||
python-version: '3.10'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
with:
|
||||
filter: |
|
||||
*.ipynb
|
||||
*.md
|
||||
*.mdx
|
||||
- name: Check new docs
|
||||
run: |
|
||||
python docs/scripts/check_templates.py ${{ steps.files.outputs.added }}
|
||||
|
||||
1
.github/workflows/people.yml
vendored
1
.github/workflows/people.yml
vendored
@@ -16,6 +16,7 @@ jobs:
|
||||
langchain-people:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
env:
|
||||
|
||||
18
.github/workflows/scheduled_test.yml
vendored
18
.github/workflows/scheduled_test.yml
vendored
@@ -27,11 +27,9 @@ jobs:
|
||||
- "libs/partners/groq"
|
||||
- "libs/partners/mistralai"
|
||||
- "libs/partners/together"
|
||||
- "libs/partners/cohere"
|
||||
- "libs/partners/google-vertexai"
|
||||
- "libs/partners/google-genai"
|
||||
- "libs/partners/aws"
|
||||
- "libs/partners/nvidia-ai-endpoints"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -41,14 +39,6 @@ jobs:
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-nvidia
|
||||
path: langchain-nvidia
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-cohere
|
||||
path: langchain-cohere
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-aws
|
||||
@@ -58,13 +48,9 @@ jobs:
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/nvidia-ai-endpoints \
|
||||
langchain/libs/partners/cohere
|
||||
langchain/libs/partners/google-vertexai
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-nvidia/libs/ai-endpoints langchain/libs/partners/nvidia-ai-endpoints
|
||||
mv langchain-cohere/libs/cohere langchain/libs/partners/cohere
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
@@ -123,8 +109,6 @@ jobs:
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/nvidia-ai-endpoints \
|
||||
langchain/libs/partners/cohere \
|
||||
langchain/libs/partners/aws
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
|
||||
21
README.md
21
README.md
@@ -11,7 +11,6 @@
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
@@ -38,24 +37,25 @@ conda install langchain -c conda-forge
|
||||
|
||||
For these applications, LangChain simplifies the entire application lifecycle:
|
||||
|
||||
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel) and [components](https://python.langchain.com/v0.2/docs/concepts/#components). Integrate with hundreds of [third-party providers](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://python.langchain.com/v0.2/docs/concepts#langchain-expression-language-lcel), [components](https://python.langchain.com/v0.2/docs/concepts), and [third-party integrations](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/v0.2/docs/langserve/).
|
||||
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/).
|
||||
|
||||
### Open-source libraries
|
||||
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it.
|
||||
|
||||
### Productionization:
|
||||
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
### Deployment:
|
||||
- **[LangServe](https://python.langchain.com/v0.2/docs/langserve/)**: A library for deploying LangChain chains as REST APIs.
|
||||
- **[LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/)**: Turn your LangGraph applications into production-ready APIs and Assistants.
|
||||
|
||||

|
||||

|
||||
|
||||
## 🧱 What can you build with LangChain?
|
||||
|
||||
@@ -106,7 +106,7 @@ Retrieval Augmented Generation involves [loading data](https://python.langchain.
|
||||
|
||||
**🤖 Agents**
|
||||
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents), along with [LangGraph](https://github.com/langchain-ai/langgraph) for building custom agents.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
@@ -120,10 +120,9 @@ Please see [here](https://python.langchain.com) for full documentation, which in
|
||||
|
||||
## 🌐 Ecosystem
|
||||
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
|
||||
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploy LangChain runnables and chains as REST APIs.
|
||||
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
@@ -57,4 +57,5 @@ Notebook | Description
|
||||
[two_agent_debate_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_agent_debate_tools.ipynb) | Simulate multi-agent dialogues where the agents can utilize various tools.
|
||||
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
|
||||
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
|
||||
File diff suppressed because one or more lines are too long
603
cookbook/img-to_img-search_CLIP_ChromaDB.ipynb
Normal file
603
cookbook/img-to_img-search_CLIP_ChromaDB.ipynb
Normal file
File diff suppressed because one or more lines are too long
756
cookbook/rag-locally-on-intel-cpu.ipynb
Normal file
756
cookbook/rag-locally-on-intel-cpu.ipynb
Normal file
@@ -0,0 +1,756 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "10f50955-be55-422f-8c62-3a32f8cf02ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG application running locally on Intel Xeon CPU using langchain and open-source models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48113be6-44bb-4aac-aed3-76a1365b9561",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Author - Pratool Bharti (pratool.bharti@intel.com)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8b10b54b-1572-4ea1-9c1e-1d29fcc3dcd9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this cookbook, we use langchain tools and open source models to execute locally on CPU. This notebook has been validated to run on Intel Xeon 8480+ CPU. Here we implement a RAG pipeline for Llama2 model to answer questions about Intel Q1 2024 earnings release."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "acadbcec-3468-4926-8ce5-03b678041c0a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Create a conda or virtualenv environment with python >=3.10 and install following libraries**\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"`pip install --upgrade langchain langchain-community langchainhub langchain-chroma bs4 gpt4all pypdf pysqlite3-binary` <br>\n",
|
||||
"`pip install llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84c392c8-700a-42ec-8e94-806597f22e43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Load pysqlite3 in sys modules since ChromaDB requires sqlite3.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "145cd491-b388-4ea7-bdc8-2f4995cac6fd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"__import__(\"pysqlite3\")\n",
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"sys.modules[\"sqlite3\"] = sys.modules.pop(\"pysqlite3\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14dde7e2-b236-49b9-b3a0-08c06410418c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Import essential components from langchain to load and split data**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "887643ba-249e-48d6-9aa7-d25087e8dfbf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import PyPDFLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "922c0eba-8736-4de5-bd2f-3d0f00b16e43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Download Intel Q1 2024 earnings release**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "2d6a2419-5338-4188-8615-a40a65ff8019",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--2024-07-15 15:04:43-- https://d1io3yog0oux5.cloudfront.net/_11d435a500963f99155ee058df09f574/intel/db/887/9014/earnings_release/Q1+24_EarningsRelease_FINAL.pdf\n",
|
||||
"Resolving proxy-dmz.intel.com (proxy-dmz.intel.com)... 10.7.211.16\n",
|
||||
"Connecting to proxy-dmz.intel.com (proxy-dmz.intel.com)|10.7.211.16|:912... connected.\n",
|
||||
"Proxy request sent, awaiting response... 200 OK\n",
|
||||
"Length: 133510 (130K) [application/pdf]\n",
|
||||
"Saving to: ‘intel_q1_2024_earnings.pdf’\n",
|
||||
"\n",
|
||||
"intel_q1_2024_earni 100%[===================>] 130.38K --.-KB/s in 0.005s \n",
|
||||
"\n",
|
||||
"2024-07-15 15:04:44 (24.6 MB/s) - ‘intel_q1_2024_earnings.pdf’ saved [133510/133510]\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!wget 'https://d1io3yog0oux5.cloudfront.net/_11d435a500963f99155ee058df09f574/intel/db/887/9014/earnings_release/Q1+24_EarningsRelease_FINAL.pdf' -O intel_q1_2024_earnings.pdf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3612627-e105-453d-8a50-bbd6e39dedb5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Loading earning release pdf document through PyPDFLoader**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "cac6278e-ebad-4224-a062-bf6daca24cb0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = PyPDFLoader(\"intel_q1_2024_earnings.pdf\")\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a7dca43b-1c62-41df-90c7-6ed2904f823d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Splitting entire document in several chunks with each chunk size is 500 tokens**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "4486adbe-0d0e-4685-8c08-c1774ed6e993",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "af142346-e793-4a52-9a56-63e3be416b3d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Looking at the first split of the document**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e4240fd1-898e-4bfc-a377-02c9bc25b56e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': 'intel_q1_2024_earnings.pdf', 'page': 0}, page_content='Intel Corporation\\n2200 Mission College Blvd.\\nSanta Clara, CA 95054-1549\\n \\nNews Release\\n Intel Reports First -Quarter 2024 Financial Results\\nNEWS SUMMARY\\n▪First-quarter revenue of $12.7 billion , up 9% year over year (YoY).\\n▪First-quarter GAAP earnings (loss) per share (EPS) attributable to Intel was $(0.09) ; non-GAAP EPS \\nattributable to Intel was $0.18 .')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"all_splits[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b88d2632-7c1b-49ef-a691-c0eb67d23e6a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**One of the major step in RAG is to convert each split of document into embeddings and store in a vector database such that searching relevant documents are efficient.** <br>\n",
|
||||
"**For that, importing Chroma vector database from langchain. Also, importing open source GPT4All for embedding models**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "9ff99dd7-9d47-4239-ba0a-d775792334ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.embeddings import GPT4AllEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b5d1f4dd-dd8d-4a20-95d1-2dbdd204375a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**In next step, we will download one of the most popular embedding model \"all-MiniLM-L6-v2\". Find more details of the model at this link https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "05db3494-5d8e-4a13-9941-26330a86f5e5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_name = \"all-MiniLM-L6-v2.gguf2.f16.gguf\"\n",
|
||||
"gpt4all_kwargs = {\"allow_download\": \"True\"}\n",
|
||||
"embeddings = GPT4AllEmbeddings(model_name=model_name, gpt4all_kwargs=gpt4all_kwargs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4e53999e-1983-46ac-8039-2783e194c3ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Store all the embeddings in the Chroma database**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "0922951a-9ddf-4761-973d-8e9a86f61284",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "29f94fa0-6c75-4a65-a1a3-debc75422479",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now, let's find relevant splits from the documents related to the question**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "88c8152d-ec7a-4f0b-9d86-877789407537",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"4\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"What is Intel CCG revenue in Q1 2024\"\n",
|
||||
"docs = vectorstore.similarity_search(question)\n",
|
||||
"print(len(docs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "53330c6b-cb0f-43f9-b379-2e57ac1e5335",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Look at the first retrieved document from the vector database**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "43a6d94f-b5c4-47b0-a353-2db4c3d24d9c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'page': 1, 'source': 'intel_q1_2024_earnings.pdf'}, page_content='Client Computing Group (CCG) $7.5 billion up31%\\nData Center and AI (DCAI) $3.0 billion up5%\\nNetwork and Edge (NEX) $1.4 billion down 8%\\nTotal Intel Products revenue $11.9 billion up17%\\nIntel Foundry $4.4 billion down 10%\\nAll other:\\nAltera $342 million down 58%\\nMobileye $239 million down 48%\\nOther $194 million up17%\\nTotal all other revenue $775 million down 46%\\nIntersegment eliminations $(4.4) billion\\nTotal net revenue $12.7 billion up9%\\nIntel Products Highlights')"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "64ba074f-4b36-442e-b7e2-b26d6e2815c3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Download Lllama-2 model from Huggingface and store locally** <br>\n",
|
||||
"**You can download different quantization variant of Lllama-2 model from the link below. We are using Q8 version here (7.16GB).** <br>\n",
|
||||
"https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c8dd0811-6f43-4bc6-b854-2ab377639c9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.Q8_0.gguf --local-dir . --local-dir-use-symlinks False"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3895b1f5-f51d-4539-abf0-af33d7ca48ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Import langchain components required to load downloaded LLMs model**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "fb087088-aa62-44c0-8356-061e9b9f1186",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.llms import LlamaCpp"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5a8a111e-2614-4b70-b034-85cd3e7304cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Loading the local Lllama-2 model using Llama-cpp library**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "fb917da2-c0d7-4995-b56d-26254276e0da",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"llama_model_loader: loaded meta data with 19 key-value pairs and 291 tensors from llama-2-7b-chat.Q8_0.gguf (version GGUF V2)\n",
|
||||
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
|
||||
"llama_model_loader: - kv 0: general.architecture str = llama\n",
|
||||
"llama_model_loader: - kv 1: general.name str = LLaMA v2\n",
|
||||
"llama_model_loader: - kv 2: llama.context_length u32 = 4096\n",
|
||||
"llama_model_loader: - kv 3: llama.embedding_length u32 = 4096\n",
|
||||
"llama_model_loader: - kv 4: llama.block_count u32 = 32\n",
|
||||
"llama_model_loader: - kv 5: llama.feed_forward_length u32 = 11008\n",
|
||||
"llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128\n",
|
||||
"llama_model_loader: - kv 7: llama.attention.head_count u32 = 32\n",
|
||||
"llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 32\n",
|
||||
"llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000001\n",
|
||||
"llama_model_loader: - kv 10: general.file_type u32 = 7\n",
|
||||
"llama_model_loader: - kv 11: tokenizer.ggml.model str = llama\n",
|
||||
"llama_model_loader: - kv 12: tokenizer.ggml.tokens arr[str,32000] = [\"<unk>\", \"<s>\", \"</s>\", \"<0x00>\", \"<...\n",
|
||||
"llama_model_loader: - kv 13: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...\n",
|
||||
"llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n",
|
||||
"llama_model_loader: - kv 15: tokenizer.ggml.bos_token_id u32 = 1\n",
|
||||
"llama_model_loader: - kv 16: tokenizer.ggml.eos_token_id u32 = 2\n",
|
||||
"llama_model_loader: - kv 17: tokenizer.ggml.unknown_token_id u32 = 0\n",
|
||||
"llama_model_loader: - kv 18: general.quantization_version u32 = 2\n",
|
||||
"llama_model_loader: - type f32: 65 tensors\n",
|
||||
"llama_model_loader: - type q8_0: 226 tensors\n",
|
||||
"llm_load_vocab: special tokens cache size = 259\n",
|
||||
"llm_load_vocab: token to piece cache size = 0.1684 MB\n",
|
||||
"llm_load_print_meta: format = GGUF V2\n",
|
||||
"llm_load_print_meta: arch = llama\n",
|
||||
"llm_load_print_meta: vocab type = SPM\n",
|
||||
"llm_load_print_meta: n_vocab = 32000\n",
|
||||
"llm_load_print_meta: n_merges = 0\n",
|
||||
"llm_load_print_meta: vocab_only = 0\n",
|
||||
"llm_load_print_meta: n_ctx_train = 4096\n",
|
||||
"llm_load_print_meta: n_embd = 4096\n",
|
||||
"llm_load_print_meta: n_layer = 32\n",
|
||||
"llm_load_print_meta: n_head = 32\n",
|
||||
"llm_load_print_meta: n_head_kv = 32\n",
|
||||
"llm_load_print_meta: n_rot = 128\n",
|
||||
"llm_load_print_meta: n_swa = 0\n",
|
||||
"llm_load_print_meta: n_embd_head_k = 128\n",
|
||||
"llm_load_print_meta: n_embd_head_v = 128\n",
|
||||
"llm_load_print_meta: n_gqa = 1\n",
|
||||
"llm_load_print_meta: n_embd_k_gqa = 4096\n",
|
||||
"llm_load_print_meta: n_embd_v_gqa = 4096\n",
|
||||
"llm_load_print_meta: f_norm_eps = 0.0e+00\n",
|
||||
"llm_load_print_meta: f_norm_rms_eps = 1.0e-06\n",
|
||||
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
|
||||
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
|
||||
"llm_load_print_meta: f_logit_scale = 0.0e+00\n",
|
||||
"llm_load_print_meta: n_ff = 11008\n",
|
||||
"llm_load_print_meta: n_expert = 0\n",
|
||||
"llm_load_print_meta: n_expert_used = 0\n",
|
||||
"llm_load_print_meta: causal attn = 1\n",
|
||||
"llm_load_print_meta: pooling type = 0\n",
|
||||
"llm_load_print_meta: rope type = 0\n",
|
||||
"llm_load_print_meta: rope scaling = linear\n",
|
||||
"llm_load_print_meta: freq_base_train = 10000.0\n",
|
||||
"llm_load_print_meta: freq_scale_train = 1\n",
|
||||
"llm_load_print_meta: n_ctx_orig_yarn = 4096\n",
|
||||
"llm_load_print_meta: rope_finetuned = unknown\n",
|
||||
"llm_load_print_meta: ssm_d_conv = 0\n",
|
||||
"llm_load_print_meta: ssm_d_inner = 0\n",
|
||||
"llm_load_print_meta: ssm_d_state = 0\n",
|
||||
"llm_load_print_meta: ssm_dt_rank = 0\n",
|
||||
"llm_load_print_meta: model type = 7B\n",
|
||||
"llm_load_print_meta: model ftype = Q8_0\n",
|
||||
"llm_load_print_meta: model params = 6.74 B\n",
|
||||
"llm_load_print_meta: model size = 6.67 GiB (8.50 BPW) \n",
|
||||
"llm_load_print_meta: general.name = LLaMA v2\n",
|
||||
"llm_load_print_meta: BOS token = 1 '<s>'\n",
|
||||
"llm_load_print_meta: EOS token = 2 '</s>'\n",
|
||||
"llm_load_print_meta: UNK token = 0 '<unk>'\n",
|
||||
"llm_load_print_meta: LF token = 13 '<0x0A>'\n",
|
||||
"llm_load_print_meta: max token length = 48\n",
|
||||
"llm_load_tensors: ggml ctx size = 0.14 MiB\n",
|
||||
"llm_load_tensors: CPU buffer size = 6828.64 MiB\n",
|
||||
"...................................................................................................\n",
|
||||
"llama_new_context_with_model: n_ctx = 2048\n",
|
||||
"llama_new_context_with_model: n_batch = 512\n",
|
||||
"llama_new_context_with_model: n_ubatch = 512\n",
|
||||
"llama_new_context_with_model: flash_attn = 0\n",
|
||||
"llama_new_context_with_model: freq_base = 10000.0\n",
|
||||
"llama_new_context_with_model: freq_scale = 1\n",
|
||||
"llama_kv_cache_init: CPU KV buffer size = 1024.00 MiB\n",
|
||||
"llama_new_context_with_model: KV self size = 1024.00 MiB, K (f16): 512.00 MiB, V (f16): 512.00 MiB\n",
|
||||
"llama_new_context_with_model: CPU output buffer size = 0.12 MiB\n",
|
||||
"llama_new_context_with_model: CPU compute buffer size = 164.01 MiB\n",
|
||||
"llama_new_context_with_model: graph nodes = 1030\n",
|
||||
"llama_new_context_with_model: graph splits = 1\n",
|
||||
"AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 0 | \n",
|
||||
"Model metadata: {'tokenizer.ggml.unknown_token_id': '0', 'tokenizer.ggml.eos_token_id': '2', 'general.architecture': 'llama', 'llama.context_length': '4096', 'general.name': 'LLaMA v2', 'llama.embedding_length': '4096', 'llama.feed_forward_length': '11008', 'llama.attention.layer_norm_rms_epsilon': '0.000001', 'llama.rope.dimension_count': '128', 'llama.attention.head_count': '32', 'tokenizer.ggml.bos_token_id': '1', 'llama.block_count': '32', 'llama.attention.head_count_kv': '32', 'general.quantization_version': '2', 'tokenizer.ggml.model': 'llama', 'general.file_type': '7'}\n",
|
||||
"Using fallback chat format: llama-2\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"llama-2-7b-chat.Q8_0.gguf\",\n",
|
||||
" n_gpu_layers=-1,\n",
|
||||
" n_batch=512,\n",
|
||||
" n_ctx=2048,\n",
|
||||
" f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls\n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43e06f56-ef97-451b-87d9-8465ea442aed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now let's ask the same question to Llama model without showing them the earnings release.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "1033dd82-5532-437d-a548-27695e109589",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"?\n",
|
||||
"(NASDAQ:INTC)\n",
|
||||
"Intel's CCG (Client Computing Group) revenue for Q1 2024 was $9.6 billion, a decrease of 35% from the previous quarter and a decrease of 42% from the same period last year."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"llama_print_timings: load time = 131.20 ms\n",
|
||||
"llama_print_timings: sample time = 16.05 ms / 68 runs ( 0.24 ms per token, 4236.76 tokens per second)\n",
|
||||
"llama_print_timings: prompt eval time = 131.14 ms / 16 tokens ( 8.20 ms per token, 122.01 tokens per second)\n",
|
||||
"llama_print_timings: eval time = 3225.00 ms / 67 runs ( 48.13 ms per token, 20.78 tokens per second)\n",
|
||||
"llama_print_timings: total time = 3466.40 ms / 83 tokens\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"?\\n(NASDAQ:INTC)\\nIntel's CCG (Client Computing Group) revenue for Q1 2024 was $9.6 billion, a decrease of 35% from the previous quarter and a decrease of 42% from the same period last year.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.invoke(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "75f5cb10-746f-4e37-9386-b85a4d2b84ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**As you can see, model is giving wrong information. Correct asnwer is CCG revenue in Q1 2024 is $7.5B. Now let's apply RAG using the earning release document**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f4150ec-5692-4756-b11a-22feb7ab88ff",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**in RAG, we modify the input prompt by adding relevent documents with the question. Here, we use one of the popular RAG prompt**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "226c14b0-f43e-4a1f-a1e4-04731d467ec4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: {question} \\nContext: {context} \\nAnswer:\"))]"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"rag_prompt = hub.pull(\"rlm/rag-prompt\")\n",
|
||||
"rag_prompt.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77deb6a0-0950-450a-916a-f2a029676c20",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Appending all retreived documents in a single document**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "2dbc3327-6ef3-4c1f-8797-0c71964b0921",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e2d9f18-49d0-43a3-bea8-78746ffa86b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**The last step is to create a chain using langchain tool that will create an e2e pipeline. It will take question and context as an input.**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "427379c2-51ff-4e0f-8278-a45221363299",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough, RunnablePick\n",
|
||||
"\n",
|
||||
"# Chain\n",
|
||||
"chain = (\n",
|
||||
" RunnablePassthrough.assign(context=RunnablePick(\"context\") | format_docs)\n",
|
||||
" | rag_prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "095d6280-c949-4d00-8e32-8895a82d245f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Llama.generate: prefix-match hit\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Based on the provided context, Intel CCG revenue in Q1 2024 was $7.5 billion up 31%."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"llama_print_timings: load time = 131.20 ms\n",
|
||||
"llama_print_timings: sample time = 7.74 ms / 31 runs ( 0.25 ms per token, 4004.13 tokens per second)\n",
|
||||
"llama_print_timings: prompt eval time = 2529.41 ms / 674 tokens ( 3.75 ms per token, 266.46 tokens per second)\n",
|
||||
"llama_print_timings: eval time = 1542.94 ms / 30 runs ( 51.43 ms per token, 19.44 tokens per second)\n",
|
||||
"llama_print_timings: total time = 4123.68 ms / 704 tokens\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Based on the provided context, Intel CCG revenue in Q1 2024 was $7.5 billion up 31%.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"context\": docs, \"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "638364b2-6bd2-4471-9961-d3a1d1b9d4ee",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now we see the results are correct as it is mentioned in earnings release.** <br>\n",
|
||||
"**To further automate, we will create a chain that will take input as question and retriever so that we don't need to retrieve documents seperately**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "4654e5b7-635f-4767-8b31-4c430164cdd5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"qa_chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | rag_prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0979f393-fd0a-4e82-b844-68371c6ad68f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Now we only need to pass the question to the chain and it will fetch the contexts directly from the vector database to generate the answer**\n",
|
||||
"<br>\n",
|
||||
"**Let's try with another question**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "3ea07b82-e6ec-4084-85f4-191373530172",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Llama.generate: prefix-match hit\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" According to the provided context, Intel DCAI revenue in Q1 2024 was $3.0 billion up 5%."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"llama_print_timings: load time = 131.20 ms\n",
|
||||
"llama_print_timings: sample time = 6.28 ms / 31 runs ( 0.20 ms per token, 4937.88 tokens per second)\n",
|
||||
"llama_print_timings: prompt eval time = 2681.93 ms / 730 tokens ( 3.67 ms per token, 272.19 tokens per second)\n",
|
||||
"llama_print_timings: eval time = 1471.07 ms / 30 runs ( 49.04 ms per token, 20.39 tokens per second)\n",
|
||||
"llama_print_timings: total time = 4206.77 ms / 760 tokens\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' According to the provided context, Intel DCAI revenue in Q1 2024 was $3.0 billion up 5%.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa_chain.invoke(\"what is Intel DCAI revenue in Q1 2024?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9407f2a0-4a35-4315-8e96-02fcb80f210c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "rag-on-intel",
|
||||
"language": "python",
|
||||
"name": "rag-on-intel"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -13,7 +13,7 @@ OUTPUT_NEW_DOCS_DIR = $(OUTPUT_NEW_DIR)/docs
|
||||
|
||||
PYTHON = .venv/bin/python
|
||||
|
||||
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm" | tr '\n' ' ')
|
||||
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm|couchbase" | tr '\n' ' ')
|
||||
|
||||
PORT ?= 3001
|
||||
|
||||
@@ -38,6 +38,8 @@ generate-files:
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O $(INTERMEDIATE_DIR)/langserve.md
|
||||
@@ -59,7 +61,7 @@ render:
|
||||
$(PYTHON) scripts/notebook_convert.py $(INTERMEDIATE_DIR) $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
md-sync:
|
||||
rsync -avm --include="*/" --include="*.mdx" --include="*.md" --include="*.png" --exclude="*" $(INTERMEDIATE_DIR)/ $(OUTPUT_NEW_DOCS_DIR)
|
||||
rsync -avm --include="*/" --include="*.mdx" --include="*.md" --include="*.png" --include="*/_category_.yml" --exclude="*" $(INTERMEDIATE_DIR)/ $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
generate-references:
|
||||
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
@@ -178,3 +178,10 @@ autosummary_generate = True
|
||||
|
||||
html_copy_source = False
|
||||
html_show_sourcelink = False
|
||||
|
||||
# Set canonical URL from the Read the Docs Domain
|
||||
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
|
||||
|
||||
# Tell Jinja2 templates the build is running on Read the Docs
|
||||
if os.environ.get("READTHEDOCS", "") == "True":
|
||||
html_context["READTHEDOCS"] = True
|
||||
|
||||
@@ -10,12 +10,21 @@ from pathlib import Path
|
||||
from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union
|
||||
|
||||
import toml
|
||||
import typing_extensions
|
||||
from langchain_core.runnables import Runnable, RunnableSerializable
|
||||
from pydantic import BaseModel
|
||||
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
|
||||
ClassKind = Literal[
|
||||
"TypedDict",
|
||||
"Regular",
|
||||
"Pydantic",
|
||||
"enum",
|
||||
"RunnablePydantic",
|
||||
"RunnableNonPydantic",
|
||||
]
|
||||
|
||||
|
||||
class ClassInfo(TypedDict):
|
||||
@@ -69,8 +78,36 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
|
||||
continue
|
||||
|
||||
if inspect.isclass(type_):
|
||||
if type(type_) == typing._TypedDictMeta: # type: ignore
|
||||
# The type of the class is used to select a template
|
||||
# for the object when rendering the documentation.
|
||||
# See `templates` directory for defined templates.
|
||||
# This is a hacky solution to distinguish between different
|
||||
# kinds of thing that we want to render.
|
||||
if type(type_) is typing_extensions._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif type(type_) is typing._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# RunnableSerializable subclasses from Pydantic which
|
||||
# for which we use autodoc_pydantic for rendering.
|
||||
# We need to distinguish these from regular Pydantic
|
||||
# classes so we can hide inherited Runnable methods
|
||||
# and provide a link to the Runnable interface from
|
||||
# the template.
|
||||
kind = "RunnablePydantic"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and not issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# These are not pydantic classes but are Runnable.
|
||||
# We'll hide all the inherited methods from Runnable
|
||||
# but use a regular class template to render.
|
||||
kind = "RunnableNonPydantic"
|
||||
elif issubclass(type_, Enum):
|
||||
kind = "enum"
|
||||
elif issubclass(type_, BaseModel):
|
||||
@@ -251,6 +288,10 @@ Classes
|
||||
template = "enum.rst"
|
||||
elif class_["kind"] == "Pydantic":
|
||||
template = "pydantic.rst"
|
||||
elif class_["kind"] == "RunnablePydantic":
|
||||
template = "runnable_pydantic.rst"
|
||||
elif class_["kind"] == "RunnableNonPydantic":
|
||||
template = "runnable_non_pydantic.rst"
|
||||
else:
|
||||
template = "class.rst"
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -33,4 +33,4 @@
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
.. example_links:: {{ objname }}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace
|
||||
|
||||
|
||||
{% block attributes %}
|
||||
{% endblock %}
|
||||
|
||||
40
docs/api_reference/templates/runnable_non_pydantic.rst
Normal file
40
docs/api_reference/templates/runnable_non_pydantic.rst
Normal file
@@ -0,0 +1,40 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block attributes %}
|
||||
{% if attributes %}
|
||||
.. rubric:: {{ _('Attributes') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in attributes %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block methods %}
|
||||
{% if methods %}
|
||||
.. rubric:: {{ _('Methods') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% for item in methods %}
|
||||
.. automethod:: {{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
24
docs/api_reference/templates/runnable_pydantic.rst
Normal file
24
docs/api_reference/templates/runnable_pydantic.rst
Normal file
@@ -0,0 +1,24 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
The :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>` has additional methods that are available on runnables, such as :py:meth:`with_types <langchain_core.runnables.base.Runnable.with_types>`, :py:meth:`with_retry <langchain_core.runnables.base.Runnable.with_retry>`, :py:meth:`assign <langchain_core.runnables.base.Runnable.assign>`, :py:meth:`bind <langchain_core.runnables.base.Runnable.bind>`, :py:meth:`get_graph <langchain_core.runnables.base.Runnable.get_graph>`, and more.
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autopydantic_model:: {{ objname }}
|
||||
:model-show-json: False
|
||||
:model-show-config-summary: False
|
||||
:model-show-validator-members: False
|
||||
:model-show-field-summary: False
|
||||
:field-signature-prefix: param
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
@@ -2,132 +2,129 @@
|
||||
{%- set url_root = pathto('', 1) %}
|
||||
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
|
||||
{%- if not embedded and docstitle %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- else %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- endif %}
|
||||
{%- set lang_attr = 'en' %}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
|
||||
<!--[if gt IE 8]><!--> <html class="no-js" lang="{{ lang_attr }}" > <!--<![endif]-->
|
||||
<!--[if gt IE 8]><!-->
|
||||
<html class="no-js" lang="{{ lang_attr }}"> <!--<![endif]-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical" href="https://api.python.langchain.com/en/latest/{{pagename}}.html" />
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical"
|
||||
href="https://api.python.langchain.com/en/latest/{{ pagename }}.html"/>
|
||||
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
|
||||
<link rel="stylesheet" href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}" type="text/css" />
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
<link rel="stylesheet"
|
||||
href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}"
|
||||
type="text/css"/>
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}"
|
||||
type="text/css"{% if css.title is not none %}
|
||||
title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css"/>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css"/>
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}"
|
||||
src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
{% include "nav.html" %}
|
||||
{%- block content %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary" for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
<div class="btn-group w-100 mb-2" role="group" aria-label="rellinks">
|
||||
{%- if prev %}
|
||||
<a href="{{ prev.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ prev.title|striptags }}">Prev</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Prev</a>
|
||||
{%- endif %}
|
||||
{%- if parents -%}
|
||||
<a href="{{ parents[-1].link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ parents[-1].title|striptags }}">Up</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink disabled py-1">Up</a>
|
||||
{%- endif %}
|
||||
{%- if next %}
|
||||
<a href="{{ next.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ next.title|striptags }}">Next</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Next</a>
|
||||
{%- endif %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary"
|
||||
for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}"
|
||||
class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}"
|
||||
class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}" class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}" class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}
|
||||
© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}
|
||||
.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated
|
||||
on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}"
|
||||
rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%- endblock %}
|
||||
<script src="{{ pathto('_static/js/vendor/bootstrap.min.js', 1) }}"></script>
|
||||
{% include "javascript.html" %}
|
||||
|
||||
1248
docs/data/people.yml
1248
docs/data/people.yml
File diff suppressed because it is too large
Load Diff
@@ -24,21 +24,22 @@ Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype
|
||||
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
|
||||
| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
|
||||
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
|
||||
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
## Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
@@ -418,7 +419,7 @@ publicly available.
|
||||
- **URL:** http://arxiv.org/abs/2304.03442v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
|
||||
- **Cookbook:** [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
|
||||
**Abstract:** Believable proxies of human behavior can empower interactive applications
|
||||
ranging from immersive environments to rehearsal spaces for interpersonal
|
||||
@@ -540,7 +541,7 @@ more than 1/1,000th the compute of GPT-4.
|
||||
- **URL:** http://arxiv.org/abs/2301.10226v4
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Potential harms of large language models can be mitigated by watermarking
|
||||
model output, i.e., embedding signals into generated text that are invisible to
|
||||
@@ -683,6 +684,41 @@ accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
|
||||
which uses chain-of-thought by absolute 15% top-1. Our code and data are
|
||||
publicly available at http://reasonwithpal.com/ .
|
||||
|
||||
## ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
|
||||
- **arXiv id:** 2210.03629v3
|
||||
- **Title:** ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
- **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.
|
||||
- **Published Date:** 2022-10-06
|
||||
- **URL:** http://arxiv.org/abs/2210.03629v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)
|
||||
- **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
|
||||
**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
|
||||
across tasks in language understanding and interactive decision making, their
|
||||
abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g.
|
||||
action plan generation) have primarily been studied as separate topics. In this
|
||||
paper, we explore the use of LLMs to generate both reasoning traces and
|
||||
task-specific actions in an interleaved manner, allowing for greater synergy
|
||||
between the two: reasoning traces help the model induce, track, and update
|
||||
action plans as well as handle exceptions, while actions allow it to interface
|
||||
with external sources, such as knowledge bases or environments, to gather
|
||||
additional information. We apply our approach, named ReAct, to a diverse set of
|
||||
language and decision making tasks and demonstrate its effectiveness over
|
||||
state-of-the-art baselines, as well as improved human interpretability and
|
||||
trustworthiness over methods without reasoning or acting components.
|
||||
Concretely, on question answering (HotpotQA) and fact verification (Fever),
|
||||
ReAct overcomes issues of hallucination and error propagation prevalent in
|
||||
chain-of-thought reasoning by interacting with a simple Wikipedia API, and
|
||||
generates human-like task-solving trajectories that are more interpretable than
|
||||
baselines without reasoning traces. On two interactive decision making
|
||||
benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and
|
||||
reinforcement learning methods by an absolute success rate of 34% and 10%
|
||||
respectively, while being prompted with only one or two in-context examples.
|
||||
Project site with code: https://react-lm.github.io
|
||||
|
||||
## Deep Lake: a Lakehouse for Deep Learning
|
||||
|
||||
- **arXiv id:** 2209.10785v2
|
||||
@@ -768,7 +804,7 @@ few-shot examples.
|
||||
- **URL:** http://arxiv.org/abs/2202.00666v5
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Today's probabilistic language generators fall short when it comes to
|
||||
producing coherent and fluent text despite the fact that the underlying models
|
||||
@@ -832,7 +868,7 @@ https://github.com/OpenAI/CLIP.
|
||||
- **URL:** http://arxiv.org/abs/1909.05858v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint)
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Large-scale language models show promising text generation capabilities, but
|
||||
users cannot easily control particular aspects of the generated text. We
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)
|
||||
### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)
|
||||
### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)
|
||||
### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)
|
||||
|
||||
## Courses
|
||||
|
||||
@@ -45,7 +46,6 @@
|
||||
- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
|
||||
- [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)
|
||||
|
||||
---------------------
|
||||
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ LangChain as a framework consists of a number of packages.
|
||||
|
||||
### `langchain-core`
|
||||
This package contains base abstractions of different components and ways to compose them together.
|
||||
The interfaces for core components like LLMs, vectorstores, retrievers and more are defined here.
|
||||
The interfaces for core components like LLMs, vector stores, retrievers and more are defined here.
|
||||
No third party integrations are defined here.
|
||||
The dependencies are kept purposefully very lightweight.
|
||||
|
||||
@@ -30,7 +30,7 @@ All chains, agents, and retrieval strategies here are NOT specific to any one in
|
||||
|
||||
This package contains third party integrations that are maintained by the LangChain community.
|
||||
Key partner packages are separated out (see below).
|
||||
This contains all integrations for various components (LLMs, vectorstores, retrievers).
|
||||
This contains all integrations for various components (LLMs, vector stores, retrievers).
|
||||
All dependencies in this package are optional to keep the package as lightweight as possible.
|
||||
|
||||
### [`langgraph`](https://langchain-ai.github.io/langgraph)
|
||||
@@ -51,10 +51,11 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
|
||||
<ThemedImage
|
||||
alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
|
||||
sources={{
|
||||
light: useBaseUrl('/svg/langchain_stack.svg'),
|
||||
dark: useBaseUrl('/svg/langchain_stack_dark.svg'),
|
||||
light: useBaseUrl('/svg/langchain_stack_062024.svg'),
|
||||
dark: useBaseUrl('/svg/langchain_stack_062024_dark.svg'),
|
||||
}}
|
||||
title="LangChain Framework Overview"
|
||||
style={{ width: "100%" }}
|
||||
/>
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
@@ -85,11 +86,16 @@ Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas i
|
||||
As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
|
||||
With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability.
|
||||
|
||||
[**Seamless LangServe deployment**](/docs/langserve)
|
||||
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
|
||||
LCEL aims to provide consistency around behavior and customization over legacy subclassed chains such as `LLMChain` and
|
||||
`ConversationalRetrievalChain`. Many of these legacy chains hide important details like prompts, and as a wider variety
|
||||
of viable models emerge, customization has become more and more important.
|
||||
|
||||
If you are currently using one of these legacy chains, please see [this guide for guidance on how to migrate](/docs/how_to/migrate_chains/).
|
||||
|
||||
For guides on how to do specific tasks with LCEL, check out [the relevant how-to guides](/docs/how_to/#langchain-expression-language-lcel).
|
||||
|
||||
### Runnable interface
|
||||
<span data-heading-keywords="invoke"></span>
|
||||
<span data-heading-keywords="invoke,runnable"></span>
|
||||
|
||||
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
|
||||
|
||||
@@ -144,8 +150,19 @@ LangChain does not host any Chat Models, rather we rely on third party integrati
|
||||
|
||||
We have some standardized parameters when constructing ChatModels:
|
||||
- `model`: the name of the model
|
||||
- `temperature`: the sampling temperature
|
||||
- `timeout`: request timeout
|
||||
- `max_tokens`: max tokens to generate
|
||||
- `stop`: default stop sequences
|
||||
- `max_retries`: max number of times to retry requests
|
||||
- `api_key`: API key for the model provider
|
||||
- `base_url`: endpoint to send requests to
|
||||
|
||||
ChatModels also accept other parameters that are specific to that integration.
|
||||
Some important things to note:
|
||||
- standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these.
|
||||
- standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in ``langchain-community``.
|
||||
|
||||
ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model.
|
||||
|
||||
:::important
|
||||
**Tool Calling** Some chat models have been fine-tuned for tool calling and provide a dedicated API for tool calling.
|
||||
@@ -168,8 +185,15 @@ For a full list of LangChain model providers with multimodal models, [check out
|
||||
### LLMs
|
||||
<span data-heading-keywords="llm,llms"></span>
|
||||
|
||||
:::caution
|
||||
Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),
|
||||
even for non-chat use cases.
|
||||
|
||||
You are probably looking for [the section above instead](/docs/concepts/#chat-models).
|
||||
:::
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see below).
|
||||
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.
|
||||
This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).
|
||||
@@ -212,7 +236,7 @@ This is where information like log-probs and token usage may be stored.
|
||||
These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output.
|
||||
They can be accessed from there with the `.tool_calls` property.
|
||||
|
||||
This property returns a list of dictionaries. Each dictionary has the following keys:
|
||||
This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with the following arguments:
|
||||
|
||||
- `name`: The name of the tool that should be called.
|
||||
- `args`: The arguments to that tool.
|
||||
@@ -222,13 +246,18 @@ This property returns a list of dictionaries. Each dictionary has the following
|
||||
|
||||
This represents a system message, which tells the model how to behave. Not every model provider supports this.
|
||||
|
||||
#### FunctionMessage
|
||||
|
||||
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
|
||||
|
||||
#### ToolMessage
|
||||
|
||||
This represents the result of a tool call. This is distinct from a FunctionMessage in order to match OpenAI's `function` and `tool` message types. In addition to `role` and `content`, this message has a `tool_call_id` parameter which conveys the id of the call to the tool that was called to produce this result.
|
||||
This represents the result of a tool call. In addition to `role` and `content`, this message has:
|
||||
|
||||
- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
|
||||
- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
|
||||
|
||||
#### (Legacy) FunctionMessage
|
||||
|
||||
This is a legacy message type, corresponding to OpenAI's legacy function-calling API. ToolMessage should be used instead to correspond to the updated tool-calling API.
|
||||
|
||||
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
|
||||
|
||||
|
||||
### Prompt templates
|
||||
@@ -425,9 +454,14 @@ For specifics on how to use text splitters, see the [relevant how-to guides here
|
||||
### Embedding models
|
||||
<span data-heading-keywords="embedding,embeddings"></span>
|
||||
|
||||
The Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.
|
||||
Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text.
|
||||
By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning.
|
||||
These natural language search capabilities underpin many types of [context retrieval](/docs/concepts/#retrieval),
|
||||
where we provide an LLM with the relevant data it needs to effectively respond to a query.
|
||||
|
||||
Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
|
||||

|
||||
|
||||
The `Embeddings` class is a class designed for interfacing with text embedding models. There are many different embedding model providers (OpenAI, Cohere, Hugging Face, etc) and local models, and this class is designed to provide a standard interface for all of them.
|
||||
|
||||
The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
|
||||
|
||||
@@ -440,6 +474,9 @@ One of the most common ways to store and search over unstructured data is to emb
|
||||
and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query.
|
||||
A vector store takes care of storing embedded data and performing vector search for you.
|
||||
|
||||
Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before
|
||||
similarity search, allowing you more control over returned documents.
|
||||
|
||||
Vector stores can be converted to the retriever interface by doing:
|
||||
|
||||
```python
|
||||
@@ -455,7 +492,7 @@ For specifics on how to use vector stores, see the [relevant how-to guides here]
|
||||
A retriever is an interface that returns documents given an unstructured query.
|
||||
It is more general than a vector store.
|
||||
A retriever does not need to be able to store documents, only to return (or retrieve) them.
|
||||
Retrievers can be created from vectorstores, but are also broad enough to include [Wikipedia search](/docs/integrations/retrievers/wikipedia/) and [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/).
|
||||
Retrievers can be created from vector stores, but are also broad enough to include [Wikipedia search](/docs/integrations/retrievers/wikipedia/) and [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/).
|
||||
|
||||
Retrievers accept a string query as input and return a list of Document's as output.
|
||||
|
||||
@@ -464,33 +501,87 @@ For specifics on how to use retrievers, see the [relevant how-to guides here](/d
|
||||
### Tools
|
||||
<span data-heading-keywords="tool,tools"></span>
|
||||
|
||||
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
|
||||
Tools are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models.
|
||||
Tools are needed whenever you want a model to control parts of your code or call out to external APIs.
|
||||
|
||||
A tool consists of the following components:
|
||||
A tool consists of:
|
||||
|
||||
1. The name of the tool
|
||||
2. A description of what the tool does
|
||||
3. JSON schema of what the inputs to the tool are
|
||||
4. The function to call
|
||||
5. Whether the result of a tool should be returned directly to the user (only relevant for agents)
|
||||
1. The name of the tool.
|
||||
2. A description of what the tool does.
|
||||
3. A JSON schema defining the inputs to the tool.
|
||||
4. A function (and, optionally, an async variant of the function).
|
||||
|
||||
The name, description and JSON schema are provided as context
|
||||
to the LLM, allowing the LLM to determine how to use the tool
|
||||
appropriately.
|
||||
When a tool is bound to a model, the name, description and JSON schema are provided as context to the model.
|
||||
Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs.
|
||||
Typical usage may look like the following:
|
||||
|
||||
Given a list of available tools and a prompt, an LLM can request
|
||||
that one or more tools be invoked with appropriate arguments.
|
||||
```python
|
||||
tools = [...] # Define a list of tools
|
||||
llm_with_tools = llm.bind_tools(tools)
|
||||
ai_msg = llm_with_tools.invoke("do xyz...") # AIMessage(tool_calls=[ToolCall(...), ...], ...)
|
||||
```
|
||||
|
||||
Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following:
|
||||
The `AIMessage` returned from the model MAY have `tool_calls` associated with it.
|
||||
Read [this guide](/docs/concepts/#aimessage) for more information on what the response type may look like.
|
||||
|
||||
- Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models.
|
||||
- Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
|
||||
- Simpler tools are generally easier for models to use than more complex tools.
|
||||
Once the chosen tools are invoked, the results can be passed back to the model so that it can complete whatever task
|
||||
it's performing.
|
||||
There are generally two different ways to invoke the tool and pass back the response:
|
||||
|
||||
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
|
||||
#### Invoke with just the arguments
|
||||
|
||||
When you invoke a tool with just the arguments, you will get back the raw tool output (usually a string).
|
||||
This generally looks like:
|
||||
|
||||
```python
|
||||
# You will want to previously check that the LLM returned tool calls
|
||||
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
|
||||
tool_output = tool.invoke(tool_call["args"])
|
||||
tool_message = ToolMessage(content=tool_output, tool_call_id=tool_call["id"], name=tool_call["name"])
|
||||
```
|
||||
|
||||
Note that the `content` field will generally be passed back to the model.
|
||||
If you do not want the raw tool response to be passed to the model, but you still want to keep it around,
|
||||
you can transform the tool output but also pass it as an artifact (read more about [`ToolMessage.artifact` here](/docs/concepts/#toolmessage))
|
||||
|
||||
```python
|
||||
... # Same code as above
|
||||
response_for_llm = transform(response)
|
||||
tool_message = ToolMessage(content=response_for_llm, tool_call_id=tool_call["id"], name=tool_call["name"], artifact=tool_output)
|
||||
```
|
||||
|
||||
#### Invoke with `ToolCall`
|
||||
|
||||
The other way to invoke a tool is to call it with the full `ToolCall` that was generated by the model.
|
||||
When you do this, the tool will return a ToolMessage.
|
||||
The benefits of this are that you don't have to write the logic yourself to transform the tool output into a ToolMessage.
|
||||
This generally looks like:
|
||||
|
||||
```python
|
||||
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
|
||||
tool_message = tool.invoke(tool_call)
|
||||
# -> ToolMessage(content="tool result foobar...", tool_call_id=..., name="tool_name")
|
||||
```
|
||||
|
||||
If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the ToolMessage, you will need to have the tool return two things.
|
||||
Read more about [defining tools that return artifacts here](/docs/how_to/tool_artifacts/).
|
||||
|
||||
#### Best practices
|
||||
|
||||
When designing tools to be used by a model, it is important to keep in mind that:
|
||||
|
||||
- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering.
|
||||
- Simple, narrowly scoped tools are easier for models to use than complex tools.
|
||||
|
||||
#### Related
|
||||
|
||||
For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools).
|
||||
|
||||
To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/).
|
||||
|
||||
### Toolkits
|
||||
<span data-heading-keywords="toolkit,toolkits"></span>
|
||||
|
||||
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
|
||||
|
||||
@@ -524,6 +615,28 @@ If you are still using AgentExecutor, do not fear: we still have a guide on [how
|
||||
It is recommended, however, that you start to transition to LangGraph.
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
|
||||
|
||||
#### ReAct agents
|
||||
<span data-heading-keywords="react,react agent"></span>
|
||||
|
||||
One popular architecture for building agents is [**ReAct**](https://arxiv.org/abs/2210.03629).
|
||||
ReAct combines reasoning and acting in an iterative process - in fact the name "ReAct" stands for "Reason" and "Act".
|
||||
|
||||
The general flow looks like this:
|
||||
|
||||
- The model will "think" about what step to take in response to an input and any previous observations.
|
||||
- The model will then choose an action from available tools (or choose to respond to the user).
|
||||
- The model will generate arguments to that tool.
|
||||
- The agent runtime (executor) will parse out the chosen tool and call it with the generated arguments.
|
||||
- The executor will return the results of the tool call back to the model as an observation.
|
||||
- This process repeats until the agent chooses to respond.
|
||||
|
||||
There are general prompting based implementations that do not require any model-specific features, but the most
|
||||
reliable implementations use features like [tool calling](/docs/how_to/tool_calling/) to reliably format outputs
|
||||
and reduce variance.
|
||||
|
||||
Please see the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for more information,
|
||||
or [this how-to guide](/docs/how_to/migrate_agent/) for specific information on migrating to LangGraph.
|
||||
|
||||
### Callbacks
|
||||
|
||||
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
|
||||
@@ -599,6 +712,7 @@ For specifics on how to use callbacks, see the [relevant how-to guides here](/do
|
||||
## Techniques
|
||||
|
||||
### Streaming
|
||||
<span data-heading-keywords="stream,streaming"></span>
|
||||
|
||||
Individual LLM calls often run for much longer than traditional resource requests.
|
||||
This compounds when you build more complex chains or agents that require multiple reasoning steps.
|
||||
@@ -609,49 +723,9 @@ around building apps with LLMs to help alleviate latency issues, and LangChain a
|
||||
|
||||
Below, we'll discuss some concepts and considerations around streaming in LangChain.
|
||||
|
||||
#### Tokens
|
||||
|
||||
The unit that most model providers use to measure input and output is via a unit called a **token**.
|
||||
Tokens are the basic units that language models read and generate when processing or producing text.
|
||||
The exact definition of a token can vary depending on the specific way the model was trained -
|
||||
for instance, in English, a token could be a single word like "apple", or a part of a word like "app".
|
||||
|
||||
When you send a model a prompt, the words and characters in the prompt are encoded into tokens using a **tokenizer**.
|
||||
The model then streams back generated output tokens, which the tokenizer decodes into human-readable text.
|
||||
The below example shows how OpenAI models tokenize `LangChain is cool!`:
|
||||
|
||||

|
||||
|
||||
You can see that it gets split into 5 different tokens, and that the boundaries between tokens are not exactly the same as word boundaries.
|
||||
|
||||
The reason language models use tokens rather than something more immediately intuitive like "characters"
|
||||
has to do with how they process and understand text. At a high-level, language models iteratively predict their next generated output based on
|
||||
the initial input and their previous generations. Training the model using tokens language models to handle linguistic
|
||||
units (like words or subwords) that carry meaning, rather than individual characters, which makes it easier for the model
|
||||
to learn and understand the structure of the language, including grammar and context.
|
||||
Furthermore, using tokens can also improve efficiency, since the model processes fewer units of text compared to character-level processing.
|
||||
|
||||
#### Callbacks
|
||||
|
||||
The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a
|
||||
callback handler that handles the [`on_llm_new_token`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any
|
||||
[LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls
|
||||
the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response.
|
||||
You can also handle the [`on_llm_end`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup.
|
||||
|
||||
You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks.
|
||||
|
||||
Callbacks were the first technique for streaming introduced in LangChain. While powerful and generalizable,
|
||||
they can be unwieldy for developers. For example:
|
||||
|
||||
- You need to explicitly initialize and manage some aggregator or other stream to collect results.
|
||||
- The execution order isn't explicitly guaranteed, and you could theoretically have a callback run after the `.invoke()` method finishes.
|
||||
- Providers would often make you pass an additional parameter to stream outputs instead of returning them all at once.
|
||||
- You would often ignore the result of the actual model call in favor of callback results.
|
||||
|
||||
#### `.stream()` and `.astream()`
|
||||
|
||||
LangChain also includes the `.stream()` method (and the equivalent `.astream()` method for [async](https://docs.python.org/3/library/asyncio.html) environments) as a more ergonomic streaming interface.
|
||||
Most modules in LangChain include the `.stream()` method (and the equivalent `.astream()` method for [async](https://docs.python.org/3/library/asyncio.html) environments) as an ergonomic streaming interface.
|
||||
`.stream()` returns an iterator, which you can consume with a simple `for` loop. Here's an example with a chat model:
|
||||
|
||||
```python
|
||||
@@ -664,7 +738,7 @@ for chunk in model.stream("what color is the sky?"):
|
||||
```
|
||||
|
||||
For models (or other components) that don't support streaming natively, this iterator would just yield a single chunk, but
|
||||
you could still use the same general pattern. Using `.stream()` will also automatically call the model in streaming mode
|
||||
you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode
|
||||
without the need to provide additional config.
|
||||
|
||||
The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html).
|
||||
@@ -675,14 +749,15 @@ each yielded chunk.
|
||||
You can check out [this guide](/docs/how_to/streaming/#using-stream) for more detail on how to use `.stream()`.
|
||||
|
||||
#### `.astream_events()`
|
||||
<span data-heading-keywords="astream_events,stream_events,stream events"></span>
|
||||
|
||||
While the `.stream()` method is easier to use than callbacks, it only returns one type of value. This is fine for single LLM calls,
|
||||
While the `.stream()` method is intuitive, it can only return the final generated value of your chain. This is fine for single LLM calls,
|
||||
but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of
|
||||
the chain alongside the final output - for example, returning sources alongside the final generation when building a chat
|
||||
over documents app.
|
||||
|
||||
There are ways to do this using the aforementioned callbacks, or by constructing your chain in such a way that it passes intermediate
|
||||
values to the end with something like [`.assign()`](/docs/how_to/passthrough/), but LangChain also includes an
|
||||
There are ways to do this [using callbacks](/docs/concepts/#callbacks-1), or by constructing your chain in such a way that it passes intermediate
|
||||
values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an
|
||||
`.astream_events()` method that combines the flexibility of callbacks with the ergonomics of `.stream()`. When called, it returns an iterator
|
||||
which yields [various types of events](/docs/how_to/streaming/#event-reference) that you can filter and process according
|
||||
to the needs of your project.
|
||||
@@ -708,7 +783,48 @@ async for event in chain.astream_events({"topic": "parrot"}, version="v2"):
|
||||
|
||||
You can roughly think of it as an iterator over callback events (though the format differs) - and you can use it on almost all LangChain components!
|
||||
|
||||
See [this guide](/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.astream_events()`.
|
||||
See [this guide](/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.astream_events()`,
|
||||
including a table listing available events.
|
||||
|
||||
#### Callbacks
|
||||
|
||||
The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a
|
||||
callback handler that handles the [`on_llm_new_token`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any
|
||||
[LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls
|
||||
the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response.
|
||||
You can also handle the [`on_llm_end`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup.
|
||||
|
||||
You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks.
|
||||
|
||||
Callbacks were the first technique for streaming introduced in LangChain. While powerful and generalizable,
|
||||
they can be unwieldy for developers. For example:
|
||||
|
||||
- You need to explicitly initialize and manage some aggregator or other stream to collect results.
|
||||
- The execution order isn't explicitly guaranteed, and you could theoretically have a callback run after the `.invoke()` method finishes.
|
||||
- Providers would often make you pass an additional parameter to stream outputs instead of returning them all at once.
|
||||
- You would often ignore the result of the actual model call in favor of callback results.
|
||||
|
||||
#### Tokens
|
||||
|
||||
The unit that most model providers use to measure input and output is via a unit called a **token**.
|
||||
Tokens are the basic units that language models read and generate when processing or producing text.
|
||||
The exact definition of a token can vary depending on the specific way the model was trained -
|
||||
for instance, in English, a token could be a single word like "apple", or a part of a word like "app".
|
||||
|
||||
When you send a model a prompt, the words and characters in the prompt are encoded into tokens using a **tokenizer**.
|
||||
The model then streams back generated output tokens, which the tokenizer decodes into human-readable text.
|
||||
The below example shows how OpenAI models tokenize `LangChain is cool!`:
|
||||
|
||||

|
||||
|
||||
You can see that it gets split into 5 different tokens, and that the boundaries between tokens are not exactly the same as word boundaries.
|
||||
|
||||
The reason language models use tokens rather than something more immediately intuitive like "characters"
|
||||
has to do with how they process and understand text. At a high-level, language models iteratively predict their next generated output based on
|
||||
the initial input and their previous generations. Training the model using tokens language models to handle linguistic
|
||||
units (like words or subwords) that carry meaning, rather than individual characters, which makes it easier for the model
|
||||
to learn and understand the structure of the language, including grammar and context.
|
||||
Furthermore, using tokens can also improve efficiency, since the model processes fewer units of text compared to character-level processing.
|
||||
|
||||
### Structured output
|
||||
|
||||
@@ -725,14 +841,54 @@ a few ways to get structured output from models in LangChain.
|
||||
|
||||
#### `.with_structured_output()`
|
||||
|
||||
For convenience, some LangChain chat models support a `.with_structured_output()` method.
|
||||
This method only requires a schema as input, and returns a dict or Pydantic object.
|
||||
For convenience, some LangChain chat models support a [`.with_structured_output()`](/docs/how_to/structured_output/#the-with_structured_output-method)
|
||||
method. This method only requires a schema as input, and returns a dict or Pydantic object.
|
||||
Generally, this method is only present on models that support one of the more advanced methods described below,
|
||||
and will use one of them under the hood. It takes care of importing a suitable output parser and
|
||||
formatting the schema in the right format for the model.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
|
||||
class Joke(BaseModel):
|
||||
"""Joke to tell user."""
|
||||
|
||||
setup: str = Field(description="The setup of the joke")
|
||||
punchline: str = Field(description="The punchline to the joke")
|
||||
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
|
||||
|
||||
structured_llm = llm.with_structured_output(Joke)
|
||||
|
||||
structured_llm.invoke("Tell me a joke about cats")
|
||||
```
|
||||
|
||||
```
|
||||
Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)
|
||||
|
||||
```
|
||||
|
||||
We recommend this method as a starting point when working with structured output:
|
||||
|
||||
- It uses other model-specific features under the hood, without the need to import an output parser.
|
||||
- For the models that use tool calling, no special prompting is needed.
|
||||
- If multiple underlying techniques are supported, you can supply a `method` parameter to
|
||||
[toggle which one is used](/docs/how_to/structured_output/#advanced-specifying-the-method-for-structuring-outputs).
|
||||
|
||||
You may want or need to use other techniques if:
|
||||
|
||||
- The chat model you are using does not support tool calling.
|
||||
- You are working with very complex schemas and the model is having trouble generating outputs that conform.
|
||||
|
||||
For more information, check out this [how-to guide](/docs/how_to/structured_output/#the-with_structured_output-method).
|
||||
|
||||
You can also check out [this table](/docs/integrations/chat/#advanced-features) for a list of models that support
|
||||
`with_structured_output()`.
|
||||
|
||||
#### Raw prompting
|
||||
|
||||
The most intuitive way to get a model to structure output is to ask nicely.
|
||||
@@ -755,9 +911,8 @@ for smooth parsing can be surprisingly difficult and model-specific.
|
||||
Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions,
|
||||
and still others may prefer XML.
|
||||
|
||||
While we'll next go over some ways that you can take advantage of features offered by
|
||||
model providers to increase reliability, prompting techniques remain important for tuning your
|
||||
results no matter what method you choose.
|
||||
While features offered by model providers may increase reliability, prompting techniques remain important for tuning your
|
||||
results no matter which method you choose.
|
||||
|
||||
#### JSON mode
|
||||
<span data-heading-keywords="json mode"></span>
|
||||
@@ -767,10 +922,11 @@ Some models, such as [Mistral](/docs/integrations/chat/mistralai/), [OpenAI](/do
|
||||
support a feature called **JSON mode**, usually enabled via config.
|
||||
|
||||
When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON.
|
||||
Often they require some custom prompting, but it's usually much less burdensome and along the lines of,
|
||||
`"you must always return JSON"`, and the [output is easier to parse](/docs/how_to/output_parser_json/).
|
||||
Often they require some custom prompting, but it's usually much less burdensome than completely raw prompting and
|
||||
more along the lines of, `"you must always return JSON"`. The [output also generally easier to parse](/docs/how_to/output_parser_json/).
|
||||
|
||||
It's also generally simpler and more commonly available than tool calling.
|
||||
It's also generally simpler to use directly and more commonly available than tool calling, and can give
|
||||
more flexibility around prompting and shaping results than tool calling.
|
||||
|
||||
Here's an example:
|
||||
|
||||
@@ -808,7 +964,7 @@ For a full list of model providers that support JSON mode, see [this table](/doc
|
||||
We use the term tool calling interchangeably with function calling. Although
|
||||
function calling is sometimes meant to refer to invocations of a single function,
|
||||
we treat all models as though they can return multiple tool or function calls in
|
||||
each message.
|
||||
each message
|
||||
:::
|
||||
|
||||
Tool calling allows a model to respond to a given prompt by generating output that
|
||||
@@ -846,36 +1002,168 @@ The standard interface consists of:
|
||||
The following how-to guides are good practical resources for using function/tool calling:
|
||||
|
||||
- [How to return structured data from an LLM](/docs/how_to/structured_output/)
|
||||
- [How to use a model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to use a model to call tools](/docs/how_to/tool_calling)
|
||||
|
||||
For a full list of model providers that support tool calling, [see this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
### Retrieval
|
||||
|
||||
LangChain provides several advanced retrieval types. A full list is below, along with the following information:
|
||||
LLMs are trained on a large but fixed dataset, limiting their ability to reason over private or recent information. Fine-tuning an LLM with specific facts is one way to mitigate this, but is often [poorly suited for factual recall](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) and [can be costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise).
|
||||
Retrieval is the process of providing relevant information to an LLM to improve its response for a given input. Retrieval augmented generation (RAG) is the process of grounding the LLM generation (output) using the retrieved information.
|
||||
|
||||
**Name**: Name of the retrieval algorithm.
|
||||
:::tip
|
||||
|
||||
**Index Type**: Which index type (if any) this relies on.
|
||||
* See our RAG from Scratch [code](https://github.com/langchain-ai/rag-from-scratch) and [video series](https://youtube.com/playlist?list=PLfaIDFEXuae2LXbO1_PKyVJiQ23ZztA0x&feature=shared).
|
||||
* For a high-level guide on retrieval, see this [tutorial on RAG](/docs/tutorials/rag/).
|
||||
|
||||
**Uses an LLM**: Whether this retrieval method uses an LLM.
|
||||
:::
|
||||
|
||||
**When to Use**: Our commentary on when you should considering using this retrieval method.
|
||||
RAG is only as good as the retrieved documents’ relevance and quality. Fortunately, an emerging set of techniques can be employed to design and improve RAG systems. We've focused on taxonomizing and summarizing many of these techniques (see below figure) and will share some high-level strategic guidance in the following sections.
|
||||
You can and should experiment with using different pieces together. You might also find [this LangSmith guide](https://docs.smith.langchain.com/how_to_guides/evaluation/evaluate_llm_application) useful for showing how to evaluate different iterations of your app.
|
||||
|
||||
**Description**: Description of what this retrieval algorithm is doing.
|
||||

|
||||
|
||||
#### Query Translation
|
||||
|
||||
First, consider the user input(s) to your RAG system. Ideally, a RAG system can handle a wide range of inputs, from poorly worded questions to complex multi-part queries.
|
||||
**Using an LLM to review and optionally modify the input is the central idea behind query translation.** This serves as a general buffer, optimizing raw user inputs for your retrieval system.
|
||||
For example, this can be as simple as extracting keywords or as complex as generating multiple sub-questions for a complex query.
|
||||
|
||||
| Name | When to use | Description |
|
||||
|---------------|-------------|-------------|
|
||||
| [Multi-query](/docs/how_to/MultiQueryRetriever/) | When you need to cover multiple perspectives of a question. | Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, return the unique documents for all queries. |
|
||||
| [Decomposition](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems. | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). |
|
||||
| [Step-back](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a higher-level conceptual understanding is required. | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. |
|
||||
| [HyDE](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | If you have challenges retrieving relevant documents using the raw user inputs. | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch videos for a few different specific approaches:
|
||||
- [Multi-query](https://youtu.be/JChPi0CRnDY?feature=shared)
|
||||
- [Decomposition](https://youtu.be/h0OPWlEOank?feature=shared)
|
||||
- [Step-back](https://youtu.be/xn1jEjRyJ2U?feature=shared)
|
||||
- [HyDE](https://youtu.be/SaDzIVkYqyY?feature=shared)
|
||||
|
||||
:::
|
||||
|
||||
#### Routing
|
||||
|
||||
Second, consider the data sources available to your RAG system. You want to query across more than one database or across structured and unstructured data sources. **Using an LLM to review the input and route it to the appropriate data source is a simple and effective approach for querying across sources.**
|
||||
|
||||
| Name | When to use | Description |
|
||||
|------------------|--------------------------------------------|-------------|
|
||||
| [Logical routing](/docs/how_to/routing/) | When you can prompt an LLM with rules to decide where to route the input. | Logical routing can use an LLM to reason about the query and choose which datastore is most appropriate. |
|
||||
| [Semantic routing](/docs/how_to/routing/#routing-by-semantic-similarity) | When semantic similarity is an effective way to determine where to route the input. | Semantic routing embeds both query and, typically a set of prompts. It then chooses the appropriate prompt based upon similarity. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [routing](https://youtu.be/pfpIndq7Fi8?feature=shared).
|
||||
|
||||
:::
|
||||
|
||||
#### Query Construction
|
||||
|
||||
Third, consider whether any of your data sources require specific query formats. Many structured databases use SQL. Vector stores often have specific syntax for applying keyword filters to document metadata. **Using an LLM to convert a natural language query into a query syntax is a popular and powerful approach.**
|
||||
In particular, [text-to-SQL](/docs/tutorials/sql_qa/), [text-to-Cypher](/docs/tutorials/graph/), and [query analysis for metadata filters](/docs/tutorials/query_analysis/#query-analysis) are useful ways to interact with structured, graph, and vector databases respectively.
|
||||
|
||||
| Name | When to Use | Description |
|
||||
|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Text to SQL](/docs/tutorials/sql_qa/) | If users are asking questions that require information housed in a relational database, accessible via SQL. | This uses an LLM to transform user input into a SQL query. |
|
||||
| [Text-to-Cypher](/docs/tutorials/graph/) | If users are asking questions that require information housed in a graph database, accessible via Cypher. | This uses an LLM to transform user input into a Cypher query. |
|
||||
| [Self Query](/docs/how_to/self_query/) | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
|
||||
|
||||
:::tip
|
||||
|
||||
See our [blog post overview](https://blog.langchain.dev/query-construction/) and RAG from Scratch video on [query construction](https://youtu.be/kl6NwWYxvbM?feature=shared), the process of text-to-DSL where DSL is a domain specific language required to interact with a given database. This converts user questions into structured queries.
|
||||
|
||||
:::
|
||||
|
||||
#### Indexing
|
||||
|
||||
Fourth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
|
||||
|
||||
Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens.
|
||||
|
||||
Two approaches can address this tension: (1) [Multi Vector](/docs/how_to/multi_vector/) retriever using an LLM to translate documents into any form (e.g., often into a summary) that is well-suited for indexing, but returns full documents to the LLM for generation. (2) [ParentDocument](/docs/how_to/parent_document_retriever/) retriever embeds document chunks, but also returns full documents. The idea is to get the best of both worlds: use concise representations (summaries or chunks) for retrieval, but use the full documents for answer generation.
|
||||
|
||||
| Name | Index Type | Uses an LLM | When to Use | Description |
|
||||
|---------------------------|------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Vectorstore](/docs/how_to/vectorstore_retriever/) | Vectorstore | No | If you are just getting started and looking for something quick and easy. | This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. |
|
||||
| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vectorstore + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). |
|
||||
| [Multi Vector](/docs/how_to/multi_vector/) | Vectorstore + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. |
|
||||
| [Self Query](/docs/how_to/self_query/) | Vectorstore | Yes | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filer to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
|
||||
| [Contextual Compression](/docs/how_to/contextual_compression/) | Any | Sometimes | If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. | This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. |
|
||||
| [Time-Weighted Vectorstore](/docs/how_to/time_weighted_vectorstore/) | Vectorstore | No | If you have timestamps associated with your documents, and you want to retrieve the most recent ones | This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) |
|
||||
| [Multi-Query Retriever](/docs/how_to/MultiQueryRetriever/) | Any | Yes | If users are asking questions that are complex and require multiple pieces of distinct information to respond | This uses an LLM to generate multiple queries from the original one. This is useful when the original query needs pieces of information about multiple topics to be properly answered. By generating multiple queries, we can then fetch documents for each of them. |
|
||||
| [Ensemble](/docs/how_to/ensemble_retriever/) | Any | No | If you have multiple retrieval methods and want to try combining them. | This fetches documents from multiple retrievers and then combines them. |
|
||||
| [Vector store](/docs/how_to/vectorstore_retriever/) | Vector store | No | If you are just getting started and looking for something quick and easy. | This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. |
|
||||
| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vector store + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). |
|
||||
| [Multi Vector](/docs/how_to/multi_vector/) | Vector store + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. |
|
||||
| [Time-Weighted Vector store](/docs/how_to/time_weighted_vectorstore/) | Vector store | No | If you have timestamps associated with your documents, and you want to retrieve the most recent ones | This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) |
|
||||
|
||||
For a high-level guide on retrieval, see this [tutorial on RAG](/docs/tutorials/rag/).
|
||||
:::tip
|
||||
|
||||
- See our RAG from Scratch video on [indexing fundamentals](https://youtu.be/bjb_EMsTDKI?feature=shared)
|
||||
- See our RAG from Scratch video on [multi vector retriever](https://youtu.be/gTCU9I6QqCE?feature=shared)
|
||||
|
||||
:::
|
||||
|
||||
Fifth, consider ways to improve the quality of your similarity search itself. Embedding models compress text into fixed-length (vector) representations that capture the semantic content of the document. This compression is useful for search / retrieval, but puts a heavy burden on that single vector representation to capture the semantic nuance / detail of the document. In some cases, irrelevant or redundant content can dilute the semantic usefulness of the embedding.
|
||||
|
||||
[ColBERT](https://docs.google.com/presentation/d/1IRhAdGjIevrrotdplHNcc4aXgIYyKamUKTWtB3m3aMU/edit?usp=sharing) is an interesting approach to address this with a higher granularity embeddings: (1) produce a contextually influenced embedding for each token in the document and query, (2) score similarity between each query token and all document tokens, (3) take the max, (4) do this for all query tokens, and (5) take the sum of the max scores (in step 3) for all query tokens to get a query-document similarity score; this token-wise scoring can yield strong results.
|
||||
|
||||

|
||||
|
||||
There are some additional tricks to improve the quality of your retrieval. Embeddings excel at capturing semantic information, but may struggle with keyword-based queries. Many [vector stores](/docs/integrations/retrievers/pinecone_hybrid_search/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity, which marries the benefits of both approaches. Furthermore, many vector stores have [maximal marginal relevance](https://python.langchain.com/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/), which attempts to diversify the results of a search to avoid returning similar and redundant documents.
|
||||
|
||||
| Name | When to use | Description |
|
||||
|-------------------|----------------------------------------------------------|-------------|
|
||||
| [ColBERT](/docs/integrations/providers/ragatouille/#using-colbert-as-a-reranker) | When higher granularity embeddings are needed. | ColBERT uses contextually influenced embeddings for each token in the document and query to get a granular query-document similarity score. |
|
||||
| [Hybrid search](/docs/integrations/retrievers/pinecone_hybrid_search/) | When combining keyword-based and semantic similarity. | Hybrid search combines keyword and semantic similarity, marrying the benefits of both approaches. |
|
||||
| [Maximal Marginal Relevance (MMR)](/docs/integrations/vectorstores/pinecone/#maximal-marginal-relevance-searches) | When needing to diversify search results. | MMR attempts to diversify the results of a search to avoid returning similar and redundant documents. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [ColBERT](https://youtu.be/cN6S0Ehm7_8?feature=shared>).
|
||||
|
||||
:::
|
||||
|
||||
#### Post-processing
|
||||
|
||||
Sixth, consider ways to filter or rank retrieved documents. This is very useful if you are [combining documents returned from multiple sources](/docs/integrations/retrievers/cohere-reranker/#doing-reranking-with-coherererank), since it can can down-rank less relevant documents and / or [compress similar documents](/docs/how_to/contextual_compression/#more-built-in-compressors-filters).
|
||||
|
||||
| Name | Index Type | Uses an LLM | When to Use | Description |
|
||||
|---------------------------|------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Contextual Compression](/docs/how_to/contextual_compression/) | Any | Sometimes | If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. | This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. |
|
||||
| [Ensemble](/docs/how_to/ensemble_retriever/) | Any | No | If you have multiple retrieval methods and want to try combining them. | This fetches documents from multiple retrievers and then combines them. |
|
||||
| [Re-ranking](/docs/integrations/retrievers/cohere-reranker/) | Any | Yes | If you want to rank retrieved documents based upon relevance, especially if you want to combine results from multiple retrieval methods . | Given a query and a list of documents, Rerank indexes the documents from most to least semantically relevant to the query. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [RAG-Fusion](https://youtu.be/77qELPbNgxA?feature=shared), on approach for post-processing across multiple queries: Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, and combine the ranks of multiple search result lists to produce a single, unified ranking with [Reciprocal Rank Fusion (RRF)](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1).
|
||||
|
||||
:::
|
||||
|
||||
#### Generation
|
||||
|
||||
**Finally, consider ways to build self-correction into your RAG system.** RAG systems can suffer from low quality retrieval (e.g., if a user question is out of the domain for the index) and / or hallucinations in generation. A naive retrieve-generate pipeline has no ability to detect or self-correct from these kinds of errors. The concept of ["flow engineering"](https://x.com/karpathy/status/1748043513156272416) has been introduced [in the context of code generation](https://arxiv.org/abs/2401.08500): iteratively build an answer to a code question with unit tests to check and self-correct errors. Several works have applied this RAG, such as Self-RAG and Corrective-RAG. In both cases, checks for document relevance, hallucinations, and / or answer quality are performed in the RAG answer generation flow.
|
||||
|
||||
We've found that graphs are a great way to reliably express logical flows and have implemented ideas from several of these papers [using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag), as shown in the figure below (red - routing, blue - fallback, green - self-correction):
|
||||
- **Routing:** Adaptive RAG ([paper](https://arxiv.org/abs/2403.14403)). Route questions to different retrieval approaches, as discussed above
|
||||
- **Fallback:** Corrective RAG ([paper](https://arxiv.org/pdf/2401.15884.pdf)). Fallback to web search if docs are not relevant to query
|
||||
- **Self-correction:** Self-RAG ([paper](https://arxiv.org/abs/2310.11511)). Fix answers w/ hallucinations or don’t address question
|
||||
|
||||

|
||||
|
||||
| Name | When to use | Description |
|
||||
|-------------------|-----------------------------------------------------------|-------------|
|
||||
| Self-RAG | When needing to fix answers with hallucinations or irrelevant content. | Self-RAG performs checks for document relevance, hallucinations, and answer quality during the RAG answer generation flow, iteratively building an answer and self-correcting errors. |
|
||||
| Corrective-RAG | When needing a fallback mechanism for low relevance docs. | Corrective-RAG includes a fallback (e.g., to web search) if the retrieved documents are not relevant to the query, ensuring higher quality and more relevant retrieval. |
|
||||
|
||||
:::tip
|
||||
|
||||
See several videos and cookbooks showcasing RAG with LangGraph:
|
||||
- [LangGraph Corrective RAG](https://www.youtube.com/watch?v=E2shqsYwxck)
|
||||
- [LangGraph combining Adaptive, Self-RAG, and Corrective RAG](https://www.youtube.com/watch?v=-ROS6gfYIts)
|
||||
- [Cookbooks for RAG using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag)
|
||||
|
||||
See our LangGraph RAG recipes with partners:
|
||||
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/3p_integrations/langchain)
|
||||
- [Mistral](https://github.com/mistralai/cookbook/tree/main/third_party/langchain)
|
||||
|
||||
:::
|
||||
|
||||
### Text splitting
|
||||
|
||||
@@ -900,8 +1188,31 @@ Table columns:
|
||||
| Token | [many classes](/docs/how_to/split_by_token/) | Tokens | | Splits text on tokens. There exist a few different ways to measure tokens. |
|
||||
| Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. |
|
||||
| Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) |
|
||||
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
|
||||
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
|
||||
|
||||
### Evaluation
|
||||
<span data-heading-keywords="evaluation,evaluate"></span>
|
||||
|
||||
Evaluation is the process of assessing the performance and effectiveness of your LLM-powered applications.
|
||||
It involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose.
|
||||
This process is vital for building reliable applications.
|
||||
|
||||

|
||||
|
||||
[LangSmith](https://docs.smith.langchain.com/) helps with this process in a few ways:
|
||||
|
||||
- It makes it easier to create and curate datasets via its tracing and annotation features
|
||||
- It provides an evaluation framework that helps you define metrics and run your app against your dataset
|
||||
- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code
|
||||
|
||||
To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation).
|
||||
|
||||
### Tracing
|
||||
<span data-heading-keywords="trace,tracing"></span>
|
||||
|
||||
A trace is essentially a series of steps that your application takes to go from input to output.
|
||||
Traces contain individual steps called `runs`. These can be individual calls from a model, retriever,
|
||||
tool, or sub-chains.
|
||||
Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
|
||||
|
||||
For a deeper dive, check out [this LangSmith conceptual guide](https://docs.smith.langchain.com/concepts/tracing).
|
||||
|
||||
35
docs/docs/contributing/code/guidelines.mdx
Normal file
35
docs/docs/contributing/code/guidelines.mdx
Normal file
@@ -0,0 +1,35 @@
|
||||
# General guidelines
|
||||
|
||||
Here are some things to keep in mind for all types of contributions:
|
||||
|
||||
- Follow the ["fork and pull request"](https://docs.github.com/en/get-started/exploring-projects-on-github/contributing-to-a-project) workflow.
|
||||
- Fill out the checked-in pull request template when opening pull requests. Note related issues and tag relevant maintainers.
|
||||
- Ensure your PR passes formatting, linting, and testing checks before requesting a review.
|
||||
- If you would like comments or feedback on your current progress, please open an issue or discussion and tag a maintainer.
|
||||
- See the sections on [Testing](/docs/contributing/code/setup#testing) and [Formatting and Linting](/docs/contributing/code/setup#formatting-and-linting) for how to run these checks locally.
|
||||
- Backwards compatibility is key. Your changes must not be breaking, except in case of critical bug and security fixes.
|
||||
- Look for duplicate PRs or issues that have already been opened before opening a new one.
|
||||
- Keep scope as isolated as possible. As a general rule, your changes should not affect more than one package at a time.
|
||||
|
||||
## Bugfixes
|
||||
|
||||
We encourage and appreciate bugfixes. We ask that you:
|
||||
|
||||
- Explain the bug in enough detail for maintainers to be able to reproduce it.
|
||||
- If an accompanying issue exists, link to it. Prefix with `Fixes` so that the issue will close automatically when the PR is merged.
|
||||
- Avoid breaking changes if possible.
|
||||
- Include unit tests that fail without the bugfix.
|
||||
|
||||
If you come across a bug and don't know how to fix it, we ask that you open an issue for it describing in detail the environment in which you encountered the bug.
|
||||
|
||||
## New features
|
||||
|
||||
We aim to keep the bar high for new features. We generally don't accept new core abstractions, changes to infra, changes to dependencies,
|
||||
or new agents/chains from outside contributors without an existing GitHub discussion or issue that demonstrates an acute need for them.
|
||||
|
||||
- New features must come with docs, unit tests, and (if appropriate) integration tests.
|
||||
- New integrations must come with docs, unit tests, and (if appropriate) integration tests.
|
||||
- See [this page](/docs/contributing/integrations) for more details on contributing new integrations.
|
||||
- New functionality should not inherit from or use deprecated methods or classes.
|
||||
- We will reject features that are likely to lead to security vulnerabilities or reports.
|
||||
- Do not add any hard dependencies. Integrations may add optional dependencies.
|
||||
6
docs/docs/contributing/code/index.mdx
Normal file
6
docs/docs/contributing/code/index.mdx
Normal file
@@ -0,0 +1,6 @@
|
||||
# Contribute Code
|
||||
|
||||
If you would like to add a new feature or update an existing one, please read the resources below before getting started:
|
||||
|
||||
- [General guidelines](/docs/contributing/code/guidelines/)
|
||||
- [Setup](/docs/contributing/code/setup/)
|
||||
@@ -1,36 +1,9 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
---
|
||||
# Contribute Code
|
||||
# Setup
|
||||
|
||||
To contribute to this project, please follow the ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
|
||||
Please do not try to push directly to this repo unless you are a maintainer.
|
||||
|
||||
Please follow the checked-in pull request template when opening pull requests. Note related issues and tag relevant
|
||||
maintainers.
|
||||
|
||||
Pull requests cannot land without passing the formatting, linting, and testing checks first. See [Testing](#testing) and
|
||||
[Formatting and Linting](#formatting-and-linting) for how to run these checks locally.
|
||||
|
||||
It's essential that we maintain great documentation and testing. If you:
|
||||
- Fix a bug
|
||||
- Add a relevant unit or integration test when possible. These live in `tests/unit_tests` and `tests/integration_tests`.
|
||||
- Make an improvement
|
||||
- Update any affected example notebooks and documentation. These live in `docs`.
|
||||
- Update unit and integration tests when relevant.
|
||||
- Add a feature
|
||||
- Add a demo notebook in `docs/docs/`.
|
||||
- Add unit and integration tests.
|
||||
|
||||
We are a small, progress-oriented team. If there's something you'd like to add or change, opening a pull request is the
|
||||
best way to get our attention.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
This quick start guide explains how to run the repository locally.
|
||||
This guide walks through how to run the repository locally and check in your first code.
|
||||
For a [development container](https://containers.dev/), see the [.devcontainer folder](https://github.com/langchain-ai/langchain/tree/master/.devcontainer).
|
||||
|
||||
### Dependency Management: Poetry and other env/dependency managers
|
||||
## Dependency Management: Poetry and other env/dependency managers
|
||||
|
||||
This project utilizes [Poetry](https://python-poetry.org/) v1.7.1+ as a dependency manager.
|
||||
|
||||
@@ -41,7 +14,7 @@ Install Poetry: **[documentation on how to install it](https://python-poetry.org
|
||||
❗Note: If you use `Conda` or `Pyenv` as your environment/package manager, after installing Poetry,
|
||||
tell Poetry to use the virtualenv python environment (`poetry config virtualenvs.prefer-active-python true`)
|
||||
|
||||
### Different packages
|
||||
## Different packages
|
||||
|
||||
This repository contains multiple packages:
|
||||
- `langchain-core`: Base interfaces for key abstractions as well as logic for combining them in chains (LangChain Expression Language).
|
||||
@@ -59,7 +32,7 @@ For this quickstart, start with langchain-community:
|
||||
cd libs/community
|
||||
```
|
||||
|
||||
### Local Development Dependencies
|
||||
## Local Development Dependencies
|
||||
|
||||
Install langchain-community development requirements (for running langchain, running examples, linting, formatting, tests, and coverage):
|
||||
|
||||
@@ -79,9 +52,9 @@ If you are still seeing this bug on v1.6.1+, you may also try disabling "modern
|
||||
(`poetry config installer.modern-installation false`) and re-installing requirements.
|
||||
See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
|
||||
### Testing
|
||||
## Testing
|
||||
|
||||
_In `langchain`, `langchain-community`, and `langchain-experimental`, some test dependencies are optional; see section about optional dependencies_.
|
||||
**Note:** In `langchain`, `langchain-community`, and `langchain-experimental`, some test dependencies are optional. See the following section about optional dependencies.
|
||||
|
||||
Unit tests cover modular logic that does not require calls to outside APIs.
|
||||
If you add new logic, please add a unit test.
|
||||
@@ -118,11 +91,11 @@ poetry install --with test
|
||||
make test
|
||||
```
|
||||
|
||||
### Formatting and Linting
|
||||
## Formatting and Linting
|
||||
|
||||
Run these locally before submitting a PR; the CI system will check also.
|
||||
|
||||
#### Code Formatting
|
||||
### Code Formatting
|
||||
|
||||
Formatting for this project is done via [ruff](https://docs.astral.sh/ruff/rules/).
|
||||
|
||||
@@ -174,7 +147,7 @@ This can be very helpful when you've made changes to only certain parts of the p
|
||||
|
||||
We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
|
||||
#### Spellcheck
|
||||
### Spellcheck
|
||||
|
||||
Spellchecking for this project is done via [codespell](https://github.com/codespell-project/codespell).
|
||||
Note that `codespell` finds common typos, so it could have false-positive (correctly spelled but rarely used) and false-negatives (not finding misspelled) words.
|
||||
@@ -1,2 +0,0 @@
|
||||
label: 'Documentation'
|
||||
position: 3
|
||||
7
docs/docs/contributing/documentation/index.mdx
Normal file
7
docs/docs/contributing/documentation/index.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
# Contribute Documentation
|
||||
|
||||
Documentation is a vital part of LangChain. We welcome both new documentation for new features and
|
||||
community improvements to our current documentation. Please read the resources below before getting started:
|
||||
|
||||
- [Documentation style guide](/docs/contributing/documentation/style_guide/)
|
||||
- [Setup](/docs/contributing/documentation/setup/)
|
||||
@@ -1,4 +1,8 @@
|
||||
# Technical logistics
|
||||
---
|
||||
sidebar_class_name: "hidden"
|
||||
---
|
||||
|
||||
# Setup
|
||||
|
||||
LangChain documentation consists of two components:
|
||||
|
||||
@@ -12,8 +16,6 @@ used to generate the externally facing [API Reference](https://api.python.langch
|
||||
The content for the API reference is autogenerated by scanning the docstrings in the codebase. For this reason we ask that
|
||||
developers document their code well.
|
||||
|
||||
The main documentation is built using [Quarto](https://quarto.org) and [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
The `API Reference` is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/)
|
||||
from the code and is hosted by [Read the Docs](https://readthedocs.org/).
|
||||
|
||||
@@ -29,7 +31,7 @@ The content for the main documentation is located in the `/docs` directory of th
|
||||
|
||||
The documentation is written using a combination of ipython notebooks (`.ipynb` files)
|
||||
and markdown (`.mdx` files). The notebooks are converted to markdown
|
||||
using [Quarto](https://quarto.org) and then built using [Docusaurus 2](https://docusaurus.io/).
|
||||
and then built using [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
Feel free to make contributions to the main documentation! 🥰
|
||||
|
||||
@@ -48,10 +50,6 @@ locally to ensure that it looks good and is free of errors.
|
||||
If you're unable to build it locally that's okay as well, as you will be able to
|
||||
see a preview of the documentation on the pull request page.
|
||||
|
||||
### Install dependencies
|
||||
|
||||
- [Quarto](https://quarto.org) - package that converts Jupyter notebooks (`.ipynb` files) into mdx files for serving in Docusaurus. [Download link](https://quarto.org/docs/download/).
|
||||
|
||||
From the **monorepo root**, run the following command to install the dependencies:
|
||||
|
||||
```bash
|
||||
@@ -71,8 +69,6 @@ make docs_clean
|
||||
make api_docs_clean
|
||||
```
|
||||
|
||||
|
||||
|
||||
Next, you can build the documentation as outlined below:
|
||||
|
||||
```bash
|
||||
@@ -1,10 +1,8 @@
|
||||
---
|
||||
sidebar_label: "Style guide"
|
||||
sidebar_class_name: "hidden"
|
||||
---
|
||||
|
||||
# LangChain Documentation Style Guide
|
||||
|
||||
## Introduction
|
||||
# Documentation Style Guide
|
||||
|
||||
As LangChain continues to grow, the surface area of documentation required to cover it continues to grow too.
|
||||
This page provides guidelines for anyone writing documentation for LangChain, as well as some of our philosophies around
|
||||
@@ -12,116 +10,139 @@ organization and structure.
|
||||
|
||||
## Philosophy
|
||||
|
||||
LangChain's documentation aspires to follow the [Diataxis framework](https://diataxis.fr).
|
||||
Under this framework, all documentation falls under one of four categories:
|
||||
LangChain's documentation follows the [Diataxis framework](https://diataxis.fr).
|
||||
Under this framework, all documentation falls under one of four categories: [Tutorials](/docs/contributing/documentation/style_guide/#tutorials),
|
||||
[How-to guides](/docs/contributing/documentation/style_guide/#how-to-guides),
|
||||
[References](/docs/contributing/documentation/style_guide/#references), and [Explanations](/docs/contributing/documentation/style_guide/#conceptual-guide).
|
||||
|
||||
- **Tutorials**: Lessons that take the reader by the hand through a series of conceptual steps to complete a project.
|
||||
- An example of this is our [LCEL streaming guide](/docs/how_to/streaming).
|
||||
- Our guides on [custom components](/docs/how_to/custom_chat_model) is another one.
|
||||
- **How-to guides**: Guides that take the reader through the steps required to solve a real-world problem.
|
||||
- The clearest examples of this are our [Use case](/docs/how_to#use-cases) quickstart pages.
|
||||
- **Reference**: Technical descriptions of the machinery and how to operate it.
|
||||
- Our [Runnable interface](/docs/concepts#interface) page is an example of this.
|
||||
- The [API reference pages](https://api.python.langchain.com/) are another.
|
||||
- **Explanation**: Explanations that clarify and illuminate a particular topic.
|
||||
- The [LCEL primitives pages](/docs/how_to/sequence) are an example of this.
|
||||
### Tutorials
|
||||
|
||||
Tutorials are lessons that take the reader through a practical activity. Their purpose is to help the user
|
||||
gain understanding of concepts and how they interact by showing one way to achieve some goal in a hands-on way. They should **avoid** giving
|
||||
multiple permutations of ways to achieve that goal in-depth. Instead, it should guide a new user through a recommended path to accomplishing the tutorial's goal. While the end result of a tutorial does not necessarily need to
|
||||
be completely production-ready, it should be useful and practically satisfy the the goal that you clearly stated in the tutorial's introduction. Information on how to address additional scenarios
|
||||
belongs in how-to guides.
|
||||
|
||||
To quote the Diataxis website:
|
||||
|
||||
> A tutorial serves the user’s *acquisition* of skills and knowledge - their study. Its purpose is not to help the user get something done, but to help them learn.
|
||||
|
||||
In LangChain, these are often higher level guides that show off end-to-end use cases.
|
||||
|
||||
Some examples include:
|
||||
|
||||
- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain/)
|
||||
- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)
|
||||
|
||||
A good structural rule of thumb is to follow the structure of this [example from Numpy](https://numpy.org/numpy-tutorials/content/tutorial-svd.html).
|
||||
|
||||
Here are some high-level tips on writing a good tutorial:
|
||||
|
||||
- Focus on guiding the user to get something done, but keep in mind the end-goal is more to impart principles than to create a perfect production system.
|
||||
- Be specific, not abstract and follow one path.
|
||||
- No need to go deeply into alternative approaches, but it’s ok to reference them, ideally with a link to an appropriate how-to guide.
|
||||
- Get "a point on the board" as soon as possible - something the user can run that outputs something.
|
||||
- You can iterate and expand afterwards.
|
||||
- Try to frequently checkpoint at given steps where the user can run code and see progress.
|
||||
- Focus on results, not technical explanation.
|
||||
- Crosslink heavily to appropriate conceptual/reference pages.
|
||||
- The first time you mention a LangChain concept, use its full name (e.g. "LangChain Expression Language (LCEL)"), and link to its conceptual/other documentation page.
|
||||
- It's also helpful to add a prerequisite callout that links to any pages with necessary background information.
|
||||
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as related how-to guides.
|
||||
|
||||
### How-to guides
|
||||
|
||||
A how-to guide, as the name implies, demonstrates how to do something discrete and specific.
|
||||
It should assume that the user is already familiar with underlying concepts, and is trying to solve an immediate problem, but
|
||||
should still give some background or list the scenarios where the information contained within can be relevant.
|
||||
They can and should discuss alternatives if one approach may be better than another in certain cases.
|
||||
|
||||
To quote the Diataxis website:
|
||||
|
||||
> A how-to guide serves the work of the already-competent user, whom you can assume to know what they want to do, and to be able to follow your instructions correctly.
|
||||
|
||||
Some examples include:
|
||||
|
||||
- [How to: return structured data from a model](/docs/how_to/structured_output/)
|
||||
- [How to: write a custom chat model](/docs/how_to/custom_chat_model/)
|
||||
|
||||
Here are some high-level tips on writing a good how-to guide:
|
||||
|
||||
- Clearly explain what you are guiding the user through at the start.
|
||||
- Assume higher intent than a tutorial and show what the user needs to do to get that task done.
|
||||
- Assume familiarity of concepts, but explain why suggested actions are helpful.
|
||||
- Crosslink heavily to conceptual/reference pages.
|
||||
- Discuss alternatives and responses to real-world tradeoffs that may arise when solving a problem.
|
||||
- Use lots of example code.
|
||||
- Prefer full code blocks that the reader can copy and run.
|
||||
- End with a recap/next steps section summarizing what the tutorial covered and future reading, such as other related how-to guides.
|
||||
|
||||
### Conceptual guide
|
||||
|
||||
LangChain's conceptual guide falls under the **Explanation** quadrant of Diataxis. They should cover LangChain terms and concepts
|
||||
in a more abstract way than how-to guides or tutorials, and should be geared towards curious users interested in
|
||||
gaining a deeper understanding of the framework. Try to avoid excessively large code examples - the goal here is to
|
||||
impart perspective to the user rather than to finish a practical project. These guides should cover **why** things work they way they do.
|
||||
|
||||
This guide on documentation style is meant to fall under this category.
|
||||
|
||||
To quote the Diataxis website:
|
||||
|
||||
> The perspective of explanation is higher and wider than that of the other types. It does not take the user’s eye-level view, as in a how-to guide, or a close-up view of the machinery, like reference material. Its scope in each case is a topic - “an area of knowledge”, that somehow has to be bounded in a reasonable, meaningful way.
|
||||
|
||||
Some examples include:
|
||||
|
||||
- [Retrieval conceptual docs](/docs/concepts/#retrieval)
|
||||
- [Chat model conceptual docs](/docs/concepts/#chat-models)
|
||||
|
||||
Here are some high-level tips on writing a good conceptual guide:
|
||||
|
||||
- Explain design decisions. Why does concept X exist and why was it designed this way?
|
||||
- Use analogies and reference other concepts and alternatives
|
||||
- Avoid blending in too much reference content
|
||||
- You can and should reference content covered in other guides, but make sure to link to them
|
||||
|
||||
### References
|
||||
|
||||
References contain detailed, low-level information that describes exactly what functionality exists and how to use it.
|
||||
In LangChain, this is mainly our API reference pages, which are populated from docstrings within code.
|
||||
References pages are generally not read end-to-end, but are consulted as necessary when a user needs to know
|
||||
how to use something specific.
|
||||
|
||||
To quote the Diataxis website:
|
||||
|
||||
> The only purpose of a reference guide is to describe, as succinctly as possible, and in an orderly way. Whereas the content of tutorials and how-to guides are led by needs of the user, reference material is led by the product it describes.
|
||||
|
||||
Many of the reference pages in LangChain are automatically generated from code,
|
||||
but here are some high-level tips on writing a good docstring:
|
||||
|
||||
- Be concise
|
||||
- Discuss special cases and deviations from a user's expectations
|
||||
- Go into detail on required inputs and outputs
|
||||
- Light details on when one might use the feature are fine, but in-depth details belong in other sections.
|
||||
|
||||
Each category serves a distinct purpose and requires a specific approach to writing and structuring the content.
|
||||
|
||||
## Taxonomy
|
||||
|
||||
Keeping the above in mind, we have sorted LangChain's docs into categories. It is helpful to think in these terms
|
||||
when contributing new documentation:
|
||||
|
||||
### Getting started
|
||||
|
||||
The [getting started section](/docs/introduction) includes a high-level introduction to LangChain, a quickstart that
|
||||
tours LangChain's various features, and logistical instructions around installation and project setup.
|
||||
|
||||
It contains elements of **How-to guides** and **Explanations**.
|
||||
|
||||
### Use cases
|
||||
|
||||
[Use cases](/docs/how_to#use-cases) are guides that are meant to show how to use LangChain to accomplish a specific task (RAG, information extraction, etc.).
|
||||
The quickstarts should be good entrypoints for first-time LangChain developers who prefer to learn by getting something practical prototyped,
|
||||
then taking the pieces apart retrospectively. These should mirror what LangChain is good at.
|
||||
|
||||
The quickstart pages here should fit the **How-to guide** category, with the other pages intended to be **Explanations** of more
|
||||
in-depth concepts and strategies that accompany the main happy paths.
|
||||
|
||||
:::note
|
||||
The below sections are listed roughly in order of increasing level of abstraction.
|
||||
:::
|
||||
|
||||
### Expression Language
|
||||
|
||||
[LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language-lcel) is the fundamental way that most LangChain components fit together, and this section is designed to teach
|
||||
developers how to use it to build with LangChain's primitives effectively.
|
||||
|
||||
This section should contains **Tutorials** that teach how to stream and use LCEL primitives for more abstract tasks, **Explanations** of specific behaviors,
|
||||
and some **References** for how to use different methods in the Runnable interface.
|
||||
|
||||
### Components
|
||||
|
||||
The [components section](/docs/concepts) covers concepts one level of abstraction higher than LCEL.
|
||||
Abstract base classes like `BaseChatModel` and `BaseRetriever` should be covered here, as well as core implementations of these base classes,
|
||||
such as `ChatPromptTemplate` and `RecursiveCharacterTextSplitter`. Customization guides belong here too.
|
||||
|
||||
This section should contain mostly conceptual **Tutorials**, **References**, and **Explanations** of the components they cover.
|
||||
|
||||
:::note
|
||||
As a general rule of thumb, everything covered in the `Expression Language` and `Components` sections (with the exception of the `Composition` section of components) should
|
||||
cover only components that exist in `langchain_core`.
|
||||
:::
|
||||
|
||||
### Integrations
|
||||
|
||||
The [integrations](/docs/integrations/platforms/) are specific implementations of components. These often involve third-party APIs and services.
|
||||
If this is the case, as a general rule, these are maintained by the third-party partner.
|
||||
|
||||
This section should contain mostly **Explanations** and **References**, though the actual content here is more flexible than other sections and more at the
|
||||
discretion of the third-party provider.
|
||||
|
||||
:::note
|
||||
Concepts covered in `Integrations` should generally exist in `langchain_community` or specific partner packages.
|
||||
:::
|
||||
|
||||
### Guides and Ecosystem
|
||||
|
||||
The [Guides](/docs/tutorials) and [Ecosystem](https://docs.smith.langchain.com/) sections should contain guides that address higher-level problems than the sections above.
|
||||
This includes, but is not limited to, considerations around productionization and development workflows.
|
||||
|
||||
These should contain mostly **How-to guides**, **Explanations**, and **Tutorials**.
|
||||
|
||||
### API references
|
||||
|
||||
LangChain's API references. Should act as **References** (as the name implies) with some **Explanation**-focused content as well.
|
||||
|
||||
## Sample developer journey
|
||||
|
||||
We have set up our docs to assist a new developer to LangChain. Let's walk through the intended path:
|
||||
|
||||
- The developer lands on https://python.langchain.com, and reads through the introduction and the diagram.
|
||||
- If they are just curious, they may be drawn to the [Quickstart](/docs/tutorials/llm_chain) to get a high-level tour of what LangChain contains.
|
||||
- If they have a specific task in mind that they want to accomplish, they will be drawn to the Use-Case section. The use-case should provide a good, concrete hook that shows the value LangChain can provide them and be a good entrypoint to the framework.
|
||||
- They can then move to learn more about the fundamentals of LangChain through the Expression Language sections.
|
||||
- Next, they can learn about LangChain's various components and integrations.
|
||||
- Finally, they can get additional knowledge through the Guides.
|
||||
|
||||
This is only an ideal of course - sections will inevitably reference lower or higher-level concepts that are documented in other sections.
|
||||
|
||||
## Guidelines
|
||||
## General guidelines
|
||||
|
||||
Here are some other guidelines you should think about when writing and organizing documentation.
|
||||
|
||||
### Linking to other sections
|
||||
We generally do not merge new tutorials from outside contributors without an actue need.
|
||||
We welcome updates as well as new integration docs, how-tos, and references.
|
||||
|
||||
### Avoid duplication
|
||||
|
||||
Multiple pages that cover the same material in depth are difficult to maintain and cause confusion. There should
|
||||
be only one (very rarely two), canonical pages for a given concept or feature. Instead, you should link to other guides.
|
||||
|
||||
### Link to other sections
|
||||
|
||||
Because sections of the docs do not exist in a vacuum, it is important to link to other sections as often as possible
|
||||
to allow a developer to learn more about an unfamiliar topic inline.
|
||||
|
||||
This includes linking to the API references as well as conceptual sections!
|
||||
|
||||
### Conciseness
|
||||
### Be concise
|
||||
|
||||
In general, take a less-is-more approach. If a section with a good explanation of a concept already exists, you should link to it rather than
|
||||
re-explain it, unless the concept you are documenting presents some new wrinkle.
|
||||
@@ -130,9 +151,10 @@ Be concise, including in code samples.
|
||||
|
||||
### General style
|
||||
|
||||
- Use active voice and present tense whenever possible.
|
||||
- Use examples and code snippets to illustrate concepts and usage.
|
||||
- Use appropriate header levels (`#`, `##`, `###`, etc.) to organize the content hierarchically.
|
||||
- Use bullet points and numbered lists to break down information into easily digestible chunks.
|
||||
- Use tables (especially for **Reference** sections) and diagrams often to present information visually.
|
||||
- Include the table of contents for longer documentation pages to help readers navigate the content, but hide it for shorter pages.
|
||||
- Use active voice and present tense whenever possible
|
||||
- Use examples and code snippets to illustrate concepts and usage
|
||||
- Use appropriate header levels (`#`, `##`, `###`, etc.) to organize the content hierarchically
|
||||
- Use fewer cells with more code to make copy/paste easier
|
||||
- Use bullet points and numbered lists to break down information into easily digestible chunks
|
||||
- Use tables (especially for **Reference** sections) and diagrams often to present information visually
|
||||
- Include the table of contents for longer documentation pages to help readers navigate the content, but hide it for shorter pages
|
||||
|
||||
@@ -12,8 +12,8 @@ As an open-source project in a rapidly developing field, we are extremely open t
|
||||
|
||||
There are many ways to contribute to LangChain. Here are some common ways people contribute:
|
||||
|
||||
- [**Documentation**](/docs/contributing/documentation/style_guide): Help improve our docs, including this one!
|
||||
- [**Code**](./code.mdx): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Documentation**](/docs/contributing/documentation/): Help improve our docs, including this one!
|
||||
- [**Code**](/docs/contributing/code/): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools.
|
||||
- [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users.
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
sidebar_position: 5
|
||||
---
|
||||
|
||||
# Contribute Integrations
|
||||
|
||||
To begin, make sure you have all the dependencies outlined in guide on [Contributing Code](/docs/contributing/code/).
|
||||
@@ -10,7 +11,7 @@ There are a few different places you can contribute integrations for LangChain:
|
||||
- **Community**: For lighter-weight integrations that are primarily maintained by LangChain and the Open Source Community.
|
||||
- **Partner Packages**: For independent packages that are co-maintained by LangChain and a partner.
|
||||
|
||||
For the most part, new integrations should be added to the Community package. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
|
||||
For the most part, **new integrations should be added to the Community package**. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
|
||||
|
||||
In the following sections, we'll walk through how to contribute to each of these packages from a fake company, `Parrot Link AI`.
|
||||
|
||||
@@ -59,6 +60,10 @@ And add documentation to:
|
||||
|
||||
## Partner package in LangChain repo
|
||||
|
||||
:::caution
|
||||
Before starting a **partner** package, please confirm your intent with the LangChain team. Partner packages require more maintenance as separate packages, so we will close PRs that add new partner packages without prior discussion. See the above section for how to add a community integration.
|
||||
:::
|
||||
|
||||
Partner packages can be hosted in the `LangChain` monorepo or in an external repo.
|
||||
|
||||
Partner package in the `LangChain` repo is placed in `libs/partners/{partner}`
|
||||
|
||||
@@ -7,6 +7,7 @@ If you plan on contributing to LangChain code or documentation, it can be useful
|
||||
to understand the high level structure of the repository.
|
||||
|
||||
LangChain is organized as a [monorepo](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
|
||||
You can check out our [installation guide](/docs/how_to/installation/) for more on how they fit together.
|
||||
|
||||
Here's the structure visualized as a tree:
|
||||
|
||||
@@ -51,7 +52,7 @@ There are other files in the root directory level, but their presence should be
|
||||
The `/docs` directory contains the content for the documentation that is shown
|
||||
at https://python.langchain.com/ and the associated API Reference https://api.python.langchain.com/en/latest/langchain_api_reference.html.
|
||||
|
||||
See the [documentation](/docs/contributing/documentation/style_guide) guidelines to learn how to contribute to the documentation.
|
||||
See the [documentation](/docs/contributing/documentation/) guidelines to learn how to contribute to the documentation.
|
||||
|
||||
## Code
|
||||
|
||||
@@ -59,6 +60,6 @@ The `/libs` directory contains the code for the LangChain packages.
|
||||
|
||||
To learn more about how to contribute code see the following guidelines:
|
||||
|
||||
- [Code](./code.mdx) Learn how to develop in the LangChain codebase.
|
||||
- [Integrations](./integrations.mdx) to learn how to contribute to third-party integrations to langchain-community or to start a new partner package.
|
||||
- [Testing](./testing.mdx) guidelines to learn how to write tests for the packages.
|
||||
- [Code](/docs/contributing/code/): Learn how to develop in the LangChain codebase.
|
||||
- [Integrations](./integrations.mdx): Learn how to contribute to third-party integrations to `langchain-community` or to start a new partner package.
|
||||
- [Testing](./testing.mdx): Guidelines to learn how to write tests for the packages.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
sidebar_position: 6
|
||||
---
|
||||
|
||||
# Testing
|
||||
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
" def parse(self, text: str) -> List[str]:\n",
|
||||
" lines = text.strip().split(\"\\n\")\n",
|
||||
" return lines\n",
|
||||
" return list(filter(None, lines)) # Remove empty lines\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"output_parser = LineListOutputParser()\n",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence/)\n",
|
||||
"- [Tool calling](/docs/how_to/tool_calling/)\n",
|
||||
"- [Tool calling](/docs/how_to/tool_calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -142,7 +142,7 @@
|
||||
"\n",
|
||||
"## Attaching OpenAI tools\n",
|
||||
"\n",
|
||||
"Another common use-case is tool calling. While you should generally use the [`.bind_tools()`](/docs/how_to/tool_calling/) method for tool-calling models, you can also bind provider-specific args directly if you want lower level control:"
|
||||
"Another common use-case is tool calling. While you should generally use the [`.bind_tools()`](/docs/how_to/tool_calling) method for tool-calling models, you can also bind provider-specific args directly if you want lower level control:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
342
docs/docs/how_to/callbacks_custom_events.ipynb
Normal file
342
docs/docs/how_to/callbacks_custom_events.ipynb
Normal file
@@ -0,0 +1,342 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to dispatch custom callback events\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"- [Astream Events API](/docs/concepts/#astream_events) the `astream_events` method will surface custom callback events.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In some situations, you may want to dipsatch a custom callback event from within a [Runnable](/docs/concepts/#runnable-interface) so it can be surfaced\n",
|
||||
"in a custom callback handler or via the [Astream Events API](/docs/concepts/#astream_events).\n",
|
||||
"\n",
|
||||
"For example, if you have a long running tool with multiple steps, you can dispatch custom events between the steps and use these custom events to monitor progress.\n",
|
||||
"You could also surface these custom events to an end user of your application to show them how the current task is progressing.\n",
|
||||
"\n",
|
||||
"To dispatch a custom event you need to decide on two attributes for the event: the `name` and the `data`.\n",
|
||||
"\n",
|
||||
"| Attribute | Type | Description |\n",
|
||||
"|-----------|------|----------------------------------------------------------------------------------------------------------|\n",
|
||||
"| name | str | A user defined name for the event. |\n",
|
||||
"| data | Any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"* Dispatching custom callback events requires `langchain-core>=0.2.15`.\n",
|
||||
"* Custom callback events can only be dispatched from within an existing `Runnable`.\n",
|
||||
"* If using `astream_events`, you must use `version='v2'` to see custom events.\n",
|
||||
"* Sending or rendering custom callbacks events in LangSmith is not yet supported.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::caution COMPATIBILITY\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for astream_events(), to child runnables if you are running async code in python<=3.10. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"\n",
|
||||
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"\n",
|
||||
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in other Python versions.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain-core"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Astream Events API\n",
|
||||
"\n",
|
||||
"The most useful way to consume custom events is via the [Astream Events API](/docs/concepts/#astream_events).\n",
|
||||
"\n",
|
||||
"We can use the `async` `adispatch_custom_event` API to emit custom events in an async setting. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"To see custom events via the astream events API, you need to use the newer `v2` API of `astream_events`.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'foo', 'tags': [], 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'metadata': {}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def foo(x: str) -> str:\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x})\n",
|
||||
" await adispatch_custom_event(\"event2\", 5)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async for event in foo.astream_events(\"hello world\", version=\"v2\"):\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In python <= 3.10, you must propagate the config manually!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'bar', 'tags': [], 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'metadata': {}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_stream', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
|
||||
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def bar(x: str, config: RunnableConfig) -> str:\n",
|
||||
" \"\"\"An example that shows how to manually propagate config.\n",
|
||||
"\n",
|
||||
" You must do this if you're running python<=3.10.\n",
|
||||
" \"\"\"\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
|
||||
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async for event in bar.astream_events(\"hello world\", version=\"v2\"):\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async Callback Handler\n",
|
||||
"\n",
|
||||
"You can also consume the dispatched event via an async callback handler."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n",
|
||||
"Received event event2 with data: 5, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List, Optional\n",
|
||||
"from uuid import UUID\n",
|
||||
"\n",
|
||||
"from langchain_core.callbacks import AsyncCallbackHandler\n",
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" adispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class AsyncCustomCallbackHandler(AsyncCallbackHandler):\n",
|
||||
" async def on_custom_event(\n",
|
||||
" self,\n",
|
||||
" name: str,\n",
|
||||
" data: Any,\n",
|
||||
" *,\n",
|
||||
" run_id: UUID,\n",
|
||||
" tags: Optional[List[str]] = None,\n",
|
||||
" metadata: Optional[Dict[str, Any]] = None,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> None:\n",
|
||||
" print(\n",
|
||||
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"async def bar(x: str, config: RunnableConfig) -> str:\n",
|
||||
" \"\"\"An example that shows how to manually propagate config.\n",
|
||||
"\n",
|
||||
" You must do this if you're running python<=3.10.\n",
|
||||
" \"\"\"\n",
|
||||
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
|
||||
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async_handler = AsyncCustomCallbackHandler()\n",
|
||||
"await foo.ainvoke(1, {\"callbacks\": [async_handler], \"tags\": [\"foo\", \"bar\"]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sync Callback Handler\n",
|
||||
"\n",
|
||||
"Let's see how to emit custom events in a sync environment using `dispatch_custom_event`.\n",
|
||||
"\n",
|
||||
"You **must** call `dispatch_custom_event` from within an existing `Runnable`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n",
|
||||
"Received event event2 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List, Optional\n",
|
||||
"from uuid import UUID\n",
|
||||
"\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.callbacks.manager import (\n",
|
||||
" dispatch_custom_event,\n",
|
||||
")\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_core.runnables.config import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CustomHandler(BaseCallbackHandler):\n",
|
||||
" def on_custom_event(\n",
|
||||
" self,\n",
|
||||
" name: str,\n",
|
||||
" data: Any,\n",
|
||||
" *,\n",
|
||||
" run_id: UUID,\n",
|
||||
" tags: Optional[List[str]] = None,\n",
|
||||
" metadata: Optional[Dict[str, Any]] = None,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> None:\n",
|
||||
" print(\n",
|
||||
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@RunnableLambda\n",
|
||||
"def foo(x: int, config: RunnableConfig) -> int:\n",
|
||||
" dispatch_custom_event(\"event1\", {\"x\": x})\n",
|
||||
" dispatch_custom_event(\"event2\", {\"x\": x})\n",
|
||||
" return x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"handler = CustomHandler()\n",
|
||||
"foo.invoke(1, {\"callbacks\": [handler], \"tags\": [\"foo\", \"bar\"]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've seen how to emit custom events, you can check out the more in depth guide for [astream events](/docs/how_to/streaming/#using-stream-events) which is the easiest way to leverage custom events."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "cfdf4f09-8125-4ed1-8063-6feed57da8a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to let your end users choose their model\n",
|
||||
"# How to init any model in one line\n",
|
||||
"\n",
|
||||
"Many LLM applications let end users specify what model provider and model they want the application to be powered by. This requires writing some logic to initialize different ChatModels based on some user configuration. The `init_chat_model()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names.\n",
|
||||
"\n",
|
||||
@@ -15,6 +15,12 @@
|
||||
"\n",
|
||||
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain >= 0.2.8``\n",
|
||||
"\n",
|
||||
"This functionality was added in ``langchain-core == 0.2.8``. Please make sure your package is up to date.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -25,7 +31,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai"
|
||||
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -76,32 +82,6 @@
|
||||
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fff9a4c8-b6ee-4a1a-8d3d-0ecaa312d4ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Simple config example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "75c25d39-bf47-4b51-a6c6-64d9c572bfd6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_config = {\n",
|
||||
" \"model\": \"...user-specified...\",\n",
|
||||
" \"model_provider\": \"...user-specified...\",\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"max_tokens\": 1000,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(**user_config)\n",
|
||||
"llm.invoke(\"what's your name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f811f219-5e78-4b62-b495-915d52a22532",
|
||||
@@ -125,12 +105,215 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "da07b5c0-d2e6-42e4-bfcd-2efcfaae6221",
|
||||
"cell_type": "markdown",
|
||||
"id": "476a44db-c50d-4846-951d-0f1c9ba8bbaa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"## Creating a configurable model\n",
|
||||
"\n",
|
||||
"You can also create a runtime-configurable model by specifying `configurable_fields`. If you don't specify a `model` value, then \"model\" and \"model_provider\" be configurable by default."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_d576307f90', 'finish_reason': 'stop', 'logprobs': None}, id='run-5428ab5c-b5c0-46de-9946-5d4ca40dbdc8-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"configurable_model = init_chat_model(temperature=0)\n",
|
||||
"\n",
|
||||
"configurable_model.invoke(\n",
|
||||
" \"what's your name\", config={\"configurable\": {\"model\": \"gpt-4o\"}}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_012XvotUJ3kGLXJUWKBVxJUi', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-1ad1eefe-f1c6-4244-8bc6-90e2cb7ee554-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"configurable_model.invoke(\n",
|
||||
" \"what's your name\", config={\"configurable\": {\"model\": \"claude-3-5-sonnet-20240620\"}}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7f3b3d4a-4066-45e4-8297-ea81ac8e70b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Configurable model with default values\n",
|
||||
"\n",
|
||||
"We can create a configurable model with default model values, specify which parameters are configurable, and add prefixes to configurable params:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "814a2289-d0db-401e-b555-d5116112b413",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_ce0793330f', 'finish_reason': 'stop', 'logprobs': None}, id='run-3923e328-7715-4cd6-b215-98e4b6bf7c9d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first_llm = init_chat_model(\n",
|
||||
" model=\"gpt-4o\",\n",
|
||||
" temperature=0,\n",
|
||||
" configurable_fields=(\"model\", \"model_provider\", \"temperature\", \"max_tokens\"),\n",
|
||||
" config_prefix=\"first\", # useful when you have a chain with multiple models\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"first_llm.invoke(\"what's your name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "6c8755ba-c001-4f5a-a497-be3f1db83244",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_01RyYR64DoMPNCfHeNnroMXm', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-22446159-3723-43e6-88df-b84797e7751d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first_llm.invoke(\n",
|
||||
" \"what's your name\",\n",
|
||||
" config={\n",
|
||||
" \"configurable\": {\n",
|
||||
" \"first_model\": \"claude-3-5-sonnet-20240620\",\n",
|
||||
" \"first_temperature\": 0.5,\n",
|
||||
" \"first_max_tokens\": 100,\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0072b1a3-7e44-4b4e-8b07-efe1ba91a689",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using a configurable model declaratively\n",
|
||||
"\n",
|
||||
"We can call declarative operations like `bind_tools`, `with_structured_output`, `with_configurable`, etc. on a configurable model and chain a configurable model in the same way that we would a regularly instantiated chat model object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "067dabee-1050-4110-ae24-c48eba01e13b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'id': 'call_sYT3PFMufHGWJD32Hi2CTNUP'},\n",
|
||||
" {'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'New York, NY'},\n",
|
||||
" 'id': 'call_j1qjhxRnD3ffQmRyqjlI1Lnk'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetPopulation(BaseModel):\n",
|
||||
" \"\"\"Get the current population in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])\n",
|
||||
"\n",
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"what's bigger in 2024 LA or NYC\", config={\"configurable\": {\"model\": \"gpt-4o\"}}\n",
|
||||
").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "e57dfe9f-cd24-4e37-9ce9-ccf8daf78f89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'Los Angeles, CA'},\n",
|
||||
" 'id': 'toolu_01CxEHxKtVbLBrvzFS7GQ5xR'},\n",
|
||||
" {'name': 'GetPopulation',\n",
|
||||
" 'args': {'location': 'New York City, NY'},\n",
|
||||
" 'id': 'toolu_013A79qt5toWSsKunFBDZd5S'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"what's bigger in 2024 LA or NYC\",\n",
|
||||
" config={\"configurable\": {\"model\": \"claude-3-5-sonnet-20240620\"}},\n",
|
||||
").tool_calls"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -149,7 +332,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-openai >= 0.1.8`."
|
||||
"This guide requires `langchain-openai >= 0.1.9`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
"#### OpenAI\n",
|
||||
"\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_options={\"include_usage\": True}`.\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
@@ -172,18 +172,18 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='Hello' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='!' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' How' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' can' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' I' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' assist' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' you' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' today' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='?' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop'} id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='Hello' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='!' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' How' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' can' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' I' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' assist' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' you' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' today' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='?' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -191,7 +191,7 @@
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
||||
"\n",
|
||||
"aggregate = None\n",
|
||||
"for chunk in llm.stream(\"hello\", stream_options={\"include_usage\": True}):\n",
|
||||
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
|
||||
" print(chunk)\n",
|
||||
" aggregate = chunk if aggregate is None else aggregate + chunk"
|
||||
]
|
||||
@@ -229,7 +229,7 @@
|
||||
"id": "7dba63e8-0ed7-4533-8f0f-78e19c38a25c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To disable streaming token counts for OpenAI, set `\"include_usage\"` to False in `stream_options`, or omit it from the parameters:"
|
||||
"To disable streaming token counts for OpenAI, set `stream_usage` to False, or omit it from the parameters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -242,17 +242,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='Hello' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='!' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' How' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' can' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' I' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' assist' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' you' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' today' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='?' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop'} id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n"
|
||||
"content='' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='Hello' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='!' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' How' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' can' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' I' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' assist' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' you' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' today' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='?' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -267,7 +267,7 @@
|
||||
"id": "6a5d9617-be3a-419a-9276-de9c29fa50ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also enable streaming token usage by setting `model_kwargs` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
|
||||
"You can also enable streaming token usage by setting `stream_usage` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"See the below example, where we return output structured to a desired schema, but can still observe token usage streamed from intermediate steps."
|
||||
]
|
||||
@@ -275,7 +275,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "57dec1fb-bd9c-4c98-8798-8fbbe67f6b2c",
|
||||
"id": "0b1523d8-127e-4314-82fa-bd97aca37f9a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -301,7 +301,7 @@
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0125\",\n",
|
||||
" model_kwargs={\"stream_options\": {\"include_usage\": True}},\n",
|
||||
" stream_usage=True,\n",
|
||||
")\n",
|
||||
"# Under the hood, .with_structured_output binds tools to the\n",
|
||||
"# chat model and appends a parser.\n",
|
||||
@@ -341,7 +341,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "31667d54",
|
||||
"id": "b04a4486-72fd-48ce-8f9e-5d281b441195",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -361,7 +361,11 @@
|
||||
"\n",
|
||||
"from langchain_community.callbacks.manager import get_openai_callback\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0125\",\n",
|
||||
" temperature=0,\n",
|
||||
" stream_usage=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
@@ -379,14 +383,14 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "e09420f4",
|
||||
"id": "05f22a1d-b021-490f-8840-f628a07459f2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"55\n"
|
||||
"54\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -397,37 +401,29 @@
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ac51188-c8f4-4230-90fd-3cd78cdd955d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"Cost information is currently not available in streaming mode. This is because model names are currently not propagated through chunks in streaming mode, and the model name is used to look up the correct pricing. Token counts however are available:\n",
|
||||
":::\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "b241069a-265d-4497-af34-b0a5f95ae67f",
|
||||
"id": "c00c9158-7bb4-4279-88e6-ea70f46e6ac2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"28\n"
|
||||
"Tokens Used: 27\n",
|
||||
"\tPrompt Tokens: 11\n",
|
||||
"\tCompletion Tokens: 16\n",
|
||||
"Successful Requests: 1\n",
|
||||
"Total Cost (USD): $2.95e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" for chunk in llm.stream(\"Tell me a joke\", stream_options={\"include_usage\": True}):\n",
|
||||
" for chunk in llm.stream(\"Tell me a joke\"):\n",
|
||||
" pass\n",
|
||||
" print(cb.total_tokens)"
|
||||
" print(cb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -457,21 +453,7 @@
|
||||
")\n",
|
||||
"tools = load_tools([\"wikipedia\"])\n",
|
||||
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
|
||||
"agent_executor = AgentExecutor(\n",
|
||||
" agent=agent, tools=tools, verbose=True, stream_runnable=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9c1ae74d-8300-4041-9ff4-66093ee592b1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"We have to set `stream_runnable=False` for cost information, as described above. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events.\n",
|
||||
":::\n",
|
||||
"```"
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -503,36 +485,30 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Anna's hummingbird\n",
|
||||
"Summary: Anna's hummingbird (Calypte anna) is a North American species of hummingbird. It was named after Anna Masséna, Duchess of Rivoli.\n",
|
||||
"It is native to western coastal regions of North America. In the early 20th century, Anna's hummingbirds bred only in northern Baja California and Southern California. The transplanting of exotic ornamental plants in residential areas throughout the Pacific coast and inland deserts provided expanded nectar and nesting sites, allowing the species to expand its breeding range. Year-round residence of Anna's hummingbirds in the Pacific Northwest is an example of ecological release dependent on acclimation to colder winter temperatures, introduced plants, and human provision of nectar feeders during winter.\n",
|
||||
"These birds feed on nectar from flowers using a long extendable tongue. They also consume small insects and other arthropods caught in flight or gleaned from vegetation.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Page: Allen's hummingbird\n",
|
||||
"Summary: Allen's hummingbird (Selasphorus sasin) is a species of hummingbird that breeds in the western United States. It is one of seven species in the genus Selasphorus.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `{'query': 'fastest bird species'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of birds by flight speed\n",
|
||||
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Fastest animals\n",
|
||||
"Summary: This is a list of the fastest animals in the world, by types of animal.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Falcon\n",
|
||||
"Summary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.\n",
|
||||
"Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.\n",
|
||||
"The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial \"tooth\" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.\n",
|
||||
"The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.\n",
|
||||
"Some small falcons with long, narrow wings are called \"hobbies\" and some which hover while hunting are called \"kestrels\".\n",
|
||||
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species is the peregrine falcon (Falco peregrinus), which can exceed speeds of 320 km/h (200 mph) in its dives.\u001b[0m\n",
|
||||
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species in level flight is the common swift, which holds the record for the fastest confirmed level flight by a bird at 111.5 km/h (69.3 mph). The peregrine falcon is known to exceed speeds of 320 km/h (200 mph) in its dives, making it the fastest bird in terms of diving speed.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Total Tokens: 1787\n",
|
||||
"Prompt Tokens: 1687\n",
|
||||
"Completion Tokens: 100\n",
|
||||
"Total Cost (USD): $0.0009935\n"
|
||||
"Total Tokens: 1675\n",
|
||||
"Prompt Tokens: 1538\n",
|
||||
"Completion Tokens: 137\n",
|
||||
"Total Cost (USD): $0.0009745000000000001\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -71,13 +71,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")"
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -95,19 +95,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I said \"J\\'adore la programmation,\" which means \"I love programming\" in French.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I said \"J'adore la programmation,\" which means \"I love programming\" in French.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@@ -115,23 +111,25 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"messages\"),\n",
|
||||
" (\"placeholder\", \"{messages}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
"ai_msg = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French: I love programming.\"\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Translate this sentence from English to French: I love programming.\",\n",
|
||||
" ),\n",
|
||||
" AIMessage(content=\"J'adore la programmation.\"),\n",
|
||||
" HumanMessage(content=\"What did you just say?\"),\n",
|
||||
" (\"ai\", \"J'adore la programmation.\"),\n",
|
||||
" (\"human\", \"What did you just say?\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
")"
|
||||
")\n",
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -193,7 +191,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.')"
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -250,7 +248,7 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
@@ -304,10 +302,17 @@
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run dc4e2f79-4bcd-4a36-9506-55ace9040588 not found for run 34b5773e-3ced-46a6-8daf-4d464c15c940. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The translation of \"I love programming\" in French is \"J\\'adore la programmation.\"')"
|
||||
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -327,10 +332,17 @@
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run cc14b9d8-c59e-40db-a523-d6ab3fc2fa4f not found for run 5b75e25c-131e-46ee-9982-68569db04330. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.')"
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -354,12 +366,12 @@
|
||||
"\n",
|
||||
"### Trimming messages\n",
|
||||
"\n",
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is to only load and store the most recent `n` messages. Let's use an example history with some preloaded messages:"
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the historic messages before passing them to the model. Let's use an example history with some preloaded messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -371,7 +383,7 @@
|
||||
" AIMessage(content='Fine thanks!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -396,34 +408,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 7ff2d8ec-65e2-4f67-8961-e498e2c4a591 not found for run 3881e990-6596-4326-84f6-2b76949e0657. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Nemo.')"
|
||||
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain_with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
@@ -443,34 +449,33 @@
|
||||
"source": [
|
||||
"We can see the chain remembers the preloaded name.\n",
|
||||
"\n",
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the `clear` method to remove messages and re-add them to the history. We don't have to, but let's put this method at the front of our chain to ensure it's always called:"
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the built in [trim_messages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import trim_messages\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def trim_messages(chain_input):\n",
|
||||
" stored_messages = demo_ephemeral_chat_history.messages\n",
|
||||
" if len(stored_messages) <= 2:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" demo_ephemeral_chat_history.clear()\n",
|
||||
"\n",
|
||||
" for message in stored_messages[-2:]:\n",
|
||||
" demo_ephemeral_chat_history.add_message(message)\n",
|
||||
"\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"trimmer = trim_messages(strategy=\"last\", max_tokens=2, token_counter=len)\n",
|
||||
"\n",
|
||||
"chain_with_trimming = (\n",
|
||||
" RunnablePassthrough.assign(messages_trimmed=trim_messages)\n",
|
||||
" | chain_with_message_history\n",
|
||||
" RunnablePassthrough.assign(chat_history=itemgetter(\"chat_history\") | trimmer)\n",
|
||||
" | prompt\n",
|
||||
" | chat\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain_with_trimmed_history = RunnableWithMessageHistory(\n",
|
||||
" chain_with_trimming,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -483,22 +488,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 775cde65-8d22-4c44-80bb-f0b9811c32ca not found for run 5cf71d0e-4663-41cd-8dbe-e9752689cfac. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\")"
|
||||
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimming.invoke(\n",
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"Where does P. Sherman live?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
@@ -506,19 +518,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.'),\n",
|
||||
"[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content='Hello!'),\n",
|
||||
" HumanMessage(content='How are you today?'),\n",
|
||||
" AIMessage(content='Fine thanks!'),\n",
|
||||
" HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
|
||||
" HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\")]"
|
||||
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -536,48 +552,39 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run fde7123f-6fd3-421a-a3fc-2fb37dead119 not found for run 061a4563-2394-470d-a3ed-9bf1388ca431. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm sorry, I don't have access to your personal information.\")"
|
||||
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimming.invoke(\n",
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"What is my name?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\"),\n",
|
||||
" HumanMessage(content='What is my name?'),\n",
|
||||
" AIMessage(content=\"I'm sorry, I don't have access to your personal information.\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
"Check out our [how to guide on trimming messages](/docs/how_to/trim_messages/) for more."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -638,7 +645,7 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"user\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
@@ -672,7 +679,7 @@
|
||||
" return False\n",
|
||||
" summarization_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" \"Distill the above chat messages into a single summary message. Include as many specific details as you can.\",\n",
|
||||
@@ -772,9 +779,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -300,7 +300,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"id": "ac9295d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -312,10 +312,8 @@
|
||||
"\n",
|
||||
"## Quick Install\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"# Hopefully this code block isn't split\n",
|
||||
"pip install langchain\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"As an open-source project in a rapidly developing field, we are extremely open to contributions.\n",
|
||||
"\"\"\""
|
||||
@@ -323,7 +321,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"id": "3a0cb17a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -332,15 +330,14 @@
|
||||
"text/plain": [
|
||||
"[Document(page_content='# 🦜️🔗 LangChain'),\n",
|
||||
" Document(page_content='⚡ Building applications with LLMs through composability ⚡'),\n",
|
||||
" Document(page_content='## Quick Install\\n\\n```bash'),\n",
|
||||
" Document(page_content='## Quick Install'),\n",
|
||||
" Document(page_content=\"# Hopefully this code block isn't split\"),\n",
|
||||
" Document(page_content='pip install langchain'),\n",
|
||||
" Document(page_content='```'),\n",
|
||||
" Document(page_content='As an open-source project in a rapidly developing field, we'),\n",
|
||||
" Document(page_content='are extremely open to contributions.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -742,7 +739,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -48,20 +48,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "40ed76a2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
|
||||
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai\n",
|
||||
"\n",
|
||||
|
||||
@@ -220,6 +220,57 @@
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14002ec8-7ee5-4f91-9315-dd21c3808776",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### `LLMListwiseRerank`\n",
|
||||
"\n",
|
||||
"[LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
|
||||
"\n",
|
||||
"Note that `LLMListwiseRerank` requires a model with the [with_structured_output](/docs/integrations/chat/) method implemented."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "4ab9ee9f-917e-4d6f-9344-eb7f01533228",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1:\n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"\n",
|
||||
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=_filter, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7194da42",
|
||||
@@ -295,7 +346,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 8,
|
||||
"id": "617a1756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
||||
549
docs/docs/how_to/convert_runnable_to_tool.ipynb
Normal file
549
docs/docs/how_to/convert_runnable_to_tool.ipynb
Normal file
@@ -0,0 +1,549 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a8bceb3-95bd-4496-bb9e-57655136e070",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to convert Runnables as Tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Runnables](/docs/concepts#runnable-interface)\n",
|
||||
"- [Tools](/docs/concepts#tools)\n",
|
||||
"- [Agents](/docs/tutorials/agents)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Here we will demonstrate how to convert a LangChain `Runnable` into a tool that can be used by agents, chains, or chat models.\n",
|
||||
"\n",
|
||||
"## Dependencies\n",
|
||||
"\n",
|
||||
"**Note**: this guide requires `langchain-core` >= 0.2.13. We will also use [OpenAI](/docs/integrations/platforms/openai/) for embeddings, but any LangChain embeddings should suffice. We will use a simple [LangGraph](https://langchain-ai.github.io/langgraph/) agent for demonstration purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "92341f48-2c29-4ce9-8ab8-0a7c7a7c98a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install -U langchain-core langchain-openai langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b0dcc1a-48e8-4a81-b920-3563192ce076",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"LangChain [tools](/docs/concepts#tools) are interfaces that an agent, chain, or chat model can use to interact with the world. See [here](/docs/how_to/#tools) for how-to guides covering tool-calling, built-in tools, custom tools, and more information.\n",
|
||||
"\n",
|
||||
"LangChain tools-- instances of [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
|
||||
"\n",
|
||||
"- Their inputs are constrained to be serializable, specifically strings and Python `dict` objects;\n",
|
||||
"- They contain names and descriptions indicating how and when they should be used;\n",
|
||||
"- They may contain a detailed [args_schema](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
|
||||
"\n",
|
||||
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4d76680-1b6b-4862-8c4f-22766a1d41f2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic usage\n",
|
||||
"\n",
|
||||
"With typed `dict` input:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b2cc4231-64a3-4733-a284-932dcbf2fcc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from typing_extensions import TypedDict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Args(TypedDict):\n",
|
||||
" a: int\n",
|
||||
" b: List[int]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def f(x: Args) -> str:\n",
|
||||
" return str(x[\"a\"] * max(x[\"b\"]))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"runnable = RunnableLambda(f)\n",
|
||||
"as_tool = runnable.as_tool(\n",
|
||||
" name=\"My tool\",\n",
|
||||
" description=\"Explanation of when to use tool.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "57f2d435-624d-459a-903d-8509fbbde610",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Explanation of when to use tool.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'My tool',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'a': {'title': 'A', 'type': 'integer'},\n",
|
||||
" 'b': {'title': 'B', 'type': 'array', 'items': {'type': 'integer'}}},\n",
|
||||
" 'required': ['a', 'b']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(as_tool.description)\n",
|
||||
"\n",
|
||||
"as_tool.args_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "54ae7384-a03d-4fa4-8cdf-9604a4bc39ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'6'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"as_tool.invoke({\"a\": 3, \"b\": [1, 2]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9038f587-4613-4f50-b349-135f9e7e3b15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Without typing information, arg types can be specified via `arg_types`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "169f733c-4936-497f-8577-ee769dc16b88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, Dict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def g(x: Dict[str, Any]) -> str:\n",
|
||||
" return str(x[\"a\"] * max(x[\"b\"]))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"runnable = RunnableLambda(g)\n",
|
||||
"as_tool = runnable.as_tool(\n",
|
||||
" name=\"My tool\",\n",
|
||||
" description=\"Explanation of when to use tool.\",\n",
|
||||
" arg_types={\"a\": int, \"b\": List[int]},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "32b1a992-8997-4c98-8eb2-c9fe9431b799",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, the schema can be fully specified by directly passing the desired [args_schema](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool.args_schema) for the tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "eb102705-89b7-48dc-9158-d36d5f98ae8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GSchema(BaseModel):\n",
|
||||
" \"\"\"Apply a function to an integer and list of integers.\"\"\"\n",
|
||||
"\n",
|
||||
" a: int = Field(..., description=\"Integer\")\n",
|
||||
" b: List[int] = Field(..., description=\"List of ints\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"runnable = RunnableLambda(g)\n",
|
||||
"as_tool = runnable.as_tool(GSchema)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c474d85-4e01-4fae-9bba-0c6c8c26475c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"String input is also supported:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "c475282a-58d6-4c2b-af7d-99b73b7d8a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def f(x: str) -> str:\n",
|
||||
" return x + \"a\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def g(x: str) -> str:\n",
|
||||
" return x + \"z\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"runnable = RunnableLambda(f) | g\n",
|
||||
"as_tool = runnable.as_tool()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ad6d8d96-3a87-40bd-a2ac-44a8acde0a8e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'baz'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"as_tool.invoke(\"b\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "89fdb3a7-d228-48f0-8f73-262af4febb58",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## In agents\n",
|
||||
"\n",
|
||||
"Below we will incorporate LangChain Runnables as tools in an [agent](/docs/concepts/#agents) application. We will demonstrate with:\n",
|
||||
"\n",
|
||||
"- a document [retriever](/docs/concepts/#retrievers);\n",
|
||||
"- a simple [RAG](/docs/tutorials/rag/) chain, allowing an agent to delegate relevant queries to it.\n",
|
||||
"\n",
|
||||
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "d06c9f2a-4475-450f-9106-54db1d99623b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e8a2038a-d762-4196-b5e3-fdb89c11e71d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Following the [RAG tutorial](/docs/tutorials/rag/), let's first construct a retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "23d2a47e-6712-4294-81c8-2c1d76b4bb81",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"documents = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Dogs are great companions, known for their loyalty and friendliness.\",\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Cats are independent pets that often enjoy their own space.\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_documents(\n",
|
||||
" documents, embedding=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"retriever = vectorstore.as_retriever(\n",
|
||||
" search_type=\"similarity\",\n",
|
||||
" search_kwargs={\"k\": 1},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/v0.2/docs/tutorials/agents/) and provide it the tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "c939cf2a-60e9-4afd-8b47-84d76ccb13f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [\n",
|
||||
" retriever.as_tool(\n",
|
||||
" name=\"pet_info_retriever\",\n",
|
||||
" description=\"Get information about pets.\",\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"agent = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "be29437b-a187-4a0a-9a5d-419c56f2434e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content=\"[Document(id='86f835fe-4bbe-4ec6-aeb4-489a8b541707', page_content='Dogs are great companions, known for their loyalty and friendliness.')]\", name='pet_info_retriever', tool_call_id='call_W8cnfOjwqEn4cFcg19LN9mYD')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in agent.stream({\"messages\": [(\"human\", \"What are dogs known for?\")]}):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96f2ac9c-36f4-4b7a-ae33-f517734c86aa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [LangSmith trace](https://smith.langchain.com/public/44e438e3-2faf-45bd-b397-5510fc145eb9/r) for the above run."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a722fd8a-b957-4ba7-b408-35596b76835f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Going further, we can create a simple [RAG](/docs/tutorials/rag/) chain that takes an additional parameter-- here, the \"style\" of the answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "bea518c9-c711-47c2-b8cc-dbd102f71f09",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"system_prompt = \"\"\"\n",
|
||||
"You are an assistant for question-answering tasks.\n",
|
||||
"Use the below context to answer the question. If\n",
|
||||
"you don't know the answer, say you don't know.\n",
|
||||
"Use three sentences maximum and keep the answer\n",
|
||||
"concise.\n",
|
||||
"\n",
|
||||
"Answer in the style of {answer_style}.\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\n",
|
||||
"Context: {context}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system_prompt)])\n",
|
||||
"\n",
|
||||
"rag_chain = (\n",
|
||||
" {\n",
|
||||
" \"context\": itemgetter(\"question\") | retriever,\n",
|
||||
" \"question\": itemgetter(\"question\"),\n",
|
||||
" \"answer_style\": itemgetter(\"answer_style\"),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "955a23db-5218-4c34-8486-450a2ddb3443",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the input schema for our chain contains the required arguments, so it converts to a tool without further specification:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "2c9f6e61-80ed-4abb-8e77-84de3ccbc891",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'RunnableParallel<context,question,answer_style>Input',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'question': {'title': 'Question'},\n",
|
||||
" 'answer_style': {'title': 'Answer Style'}}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rag_chain.input_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "a3f9cf5b-8c71-4b0f-902b-f92e028780c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rag_tool = rag_chain.as_tool(\n",
|
||||
" name=\"pet_expert\",\n",
|
||||
" description=\"Get information about pets.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4570615b-8f96-4d97-ae01-1c08b14be584",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Below we again invoke the agent. Note that the agent populates the required parameters in its `tool_calls`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "06409913-a2ad-400f-a202-7b8dd2ef483a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='\"Dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.\"', name='pet_expert', tool_call_id='call_17iLPWvOD23zqwd1QVQ00Y63')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent = create_react_agent(llm, [rag_tool])\n",
|
||||
"\n",
|
||||
"for chunk in agent.stream(\n",
|
||||
" {\"messages\": [(\"human\", \"What would a pirate say dogs are known for?\")]}\n",
|
||||
"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96cc9bc3-e79e-49a8-9915-428ea225358b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [LangSmith trace](https://smith.langchain.com/public/147ae4e6-4dfb-4dd9-8ca0-5c5b954f08ac/r) for the above run."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -131,7 +131,7 @@
|
||||
"source": [
|
||||
"## Base Chat Model\n",
|
||||
"\n",
|
||||
"Let's implement a chat model that echoes back the first `n` characetrs of the last message in the prompt!\n",
|
||||
"Let's implement a chat model that echoes back the first `n` characters of the last message in the prompt!\n",
|
||||
"\n",
|
||||
"To do so, we will inherit from `BaseChatModel` and we'll need to implement the following:\n",
|
||||
"\n",
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "5436020b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create custom tools\n",
|
||||
"# How to create tools\n",
|
||||
"\n",
|
||||
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
|
||||
"\n",
|
||||
@@ -16,13 +16,15 @@
|
||||
"| args_schema | Pydantic BaseModel | Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters |\n",
|
||||
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
|
||||
"\n",
|
||||
"LangChain provides 3 ways to create tools:\n",
|
||||
"LangChain supports the creation of tools from:\n",
|
||||
"\n",
|
||||
"1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool.\n",
|
||||
"2. Using [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations.\n",
|
||||
"1. Functions;\n",
|
||||
"2. LangChain [Runnables](/docs/concepts#runnable-interface);\n",
|
||||
"3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
|
||||
"\n",
|
||||
"The `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases.\n",
|
||||
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
|
||||
"\n",
|
||||
"In this guide we provide an overview of these methods.\n",
|
||||
"\n",
|
||||
":::{.callout-tip}\n",
|
||||
"\n",
|
||||
@@ -35,7 +37,9 @@
|
||||
"id": "c7326b23",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## @tool decorator\n",
|
||||
"## Creating tools from functions\n",
|
||||
"\n",
|
||||
"### @tool decorator\n",
|
||||
"\n",
|
||||
"This `@tool` decorator is the simplest way to define a custom tool. The decorator uses the function name as the tool name by default, but this can be overridden by passing a string as the first argument. Additionally, the decorator will use the function's docstring as the tool's description - so a docstring MUST be provided. "
|
||||
]
|
||||
@@ -51,7 +55,7 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"multiply\n",
|
||||
"multiply(a: int, b: int) -> int - Multiply two numbers.\n",
|
||||
"Multiply two numbers.\n",
|
||||
"{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}}\n"
|
||||
]
|
||||
}
|
||||
@@ -96,6 +100,57 @@
|
||||
" return a * b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f0edc51-c586-414c-8941-c8abe779943f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that `@tool` supports parsing of annotations, nested schemas, and other features:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5626423f-053e-4a66-adca-1d794d835397",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'multiply_by_maxSchema',\n",
|
||||
" 'description': 'Multiply a by the maximum of b.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'a': {'title': 'A',\n",
|
||||
" 'description': 'scale factor',\n",
|
||||
" 'type': 'string'},\n",
|
||||
" 'b': {'title': 'B',\n",
|
||||
" 'description': 'list of ints over which to take maximum',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'integer'}}},\n",
|
||||
" 'required': ['a', 'b']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Annotated, List\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply_by_max(\n",
|
||||
" a: Annotated[str, \"scale factor\"],\n",
|
||||
" b: Annotated[List[int], \"list of ints over which to take maximum\"],\n",
|
||||
") -> int:\n",
|
||||
" \"\"\"Multiply a by the maximum of b.\"\"\"\n",
|
||||
" return a * max(b)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"multiply_by_max.args_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98d6eee9",
|
||||
@@ -106,7 +161,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -115,7 +170,7 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"multiplication-tool\n",
|
||||
"multiplication-tool(a: int, b: int) -> int - Multiply two numbers.\n",
|
||||
"Multiply two numbers.\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
|
||||
"True\n"
|
||||
]
|
||||
@@ -143,19 +198,84 @@
|
||||
"print(multiply.return_direct)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "33a9e94d-0b60-48f3-a4c2-247dce096e66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Docstring parsing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d0cb586-93d4-4ff1-9779-71df7853cb68",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`@tool` can optionally parse [Google Style docstrings](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods) and associate the docstring components (such as arg descriptions) to the relevant parts of the tool schema. To toggle this behavior, specify `parse_docstring`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "336f5538-956e-47d5-9bde-b732559f9e61",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'fooSchema',\n",
|
||||
" 'description': 'The foo.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'bar': {'title': 'Bar',\n",
|
||||
" 'description': 'The bar.',\n",
|
||||
" 'type': 'string'},\n",
|
||||
" 'baz': {'title': 'Baz', 'description': 'The baz.', 'type': 'integer'}},\n",
|
||||
" 'required': ['bar', 'baz']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def foo(bar: str, baz: int) -> str:\n",
|
||||
" \"\"\"The foo.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" bar: The bar.\n",
|
||||
" baz: The baz.\n",
|
||||
" \"\"\"\n",
|
||||
" return bar\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"foo.args_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f18a2503-5393-421b-99fa-4a01dd824d0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-caution}\n",
|
||||
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) for detail and examples.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b63fcc3b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## StructuredTool\n",
|
||||
"### StructuredTool\n",
|
||||
"\n",
|
||||
"The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -198,7 +318,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -208,7 +328,7 @@
|
||||
"text": [
|
||||
"6\n",
|
||||
"Calculator\n",
|
||||
"Calculator(a: int, b: int) -> int - multiply numbers\n",
|
||||
"multiply numbers\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
|
||||
]
|
||||
}
|
||||
@@ -239,6 +359,63 @@
|
||||
"print(calculator.args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5517995d-54e3-449b-8fdb-03561f5e4647",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating tools from Runnables\n",
|
||||
"\n",
|
||||
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
|
||||
"\n",
|
||||
"Example usage:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "8ef593c5-cf72-4c10-bfc9-7d21874a0c24",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'answer_style': {'title': 'Answer Style', 'type': 'string'}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.language_models import GenericFakeChatModel\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"Hello. Please respond in the style of {answer_style}.\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Placeholder LLM\n",
|
||||
"llm = GenericFakeChatModel(messages=iter([\"hello matey\"]))\n",
|
||||
"\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"as_tool = chain.as_tool(\n",
|
||||
" name=\"Style responder\", description=\"Description of when to use tool.\"\n",
|
||||
")\n",
|
||||
"as_tool.args"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0521b787-a146-45a6-8ace-ae1ac4669dd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [this guide](/docs/how_to/convert_runnable_to_tool) for more detail."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b840074b-9c10-4ca0-aed8-626c52b2398f",
|
||||
@@ -251,7 +428,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 10,
|
||||
"id": "1dad8f8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -300,7 +477,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 11,
|
||||
"id": "bb551c33",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -351,7 +528,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 12,
|
||||
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -383,7 +560,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 13,
|
||||
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -428,7 +605,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 14,
|
||||
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -473,7 +650,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 15,
|
||||
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -496,7 +673,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 16,
|
||||
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -506,7 +683,7 @@
|
||||
"'Error: There is no city by the name of foobar.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -530,7 +707,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 17,
|
||||
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -540,7 +717,7 @@
|
||||
"\"There is no such city, but it's probably above 0K there!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -564,7 +741,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 18,
|
||||
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -574,7 +751,7 @@
|
||||
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -591,13 +768,189 @@
|
||||
"\n",
|
||||
"get_weather_tool.invoke({\"city\": \"foobar\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a8d8383-11b3-445e-956f-df4e96995e00",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Returning artifacts of Tool execution\n",
|
||||
"\n",
|
||||
"Sometimes there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns custom objects like Documents, we may want to pass some view or metadata about this output to the model without passing the raw output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
|
||||
"\n",
|
||||
"The Tool and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-core >= 0.2.19``\n",
|
||||
"\n",
|
||||
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format=\"content_and_artifact\"` when defining our tool and make sure that we return a tuple of (content, artifact):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "14905425-0334-43a0-9de9-5bcf622ede0e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"from typing import List, Tuple\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(response_format=\"content_and_artifact\")\n",
|
||||
"def generate_random_ints(min: int, max: int, size: int) -> Tuple[str, List[int]]:\n",
|
||||
" \"\"\"Generate size random ints in the range [min, max].\"\"\"\n",
|
||||
" array = [random.randint(min, max) for _ in range(size)]\n",
|
||||
" content = f\"Successfully generated array of {size} random ints in [{min}, {max}].\"\n",
|
||||
" return content, array"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49f057a6-8938-43ea-8faf-ae41e797ceb8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we invoke our tool directly with the tool arguments, we'll get back just the content part of the output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0f2e1528-404b-46e6-b87c-f0957c4b9217",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Successfully generated array of 10 random ints in [0, 9].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke({\"min\": 0, \"max\": 9, \"size\": 10})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e62ebba-1737-4b97-b61a-7313ade4e8c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we invoke our tool with a ToolCall (like the ones generated by tool-calling models), we'll get back a ToolMessage that contains both the content and artifact generated by the Tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cc197777-26eb-46b3-a83b-c2ce116c6311",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[1, 4, 2, 5, 3, 9, 0, 4, 7, 7])"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke(\n",
|
||||
" {\n",
|
||||
" \"name\": \"generate_random_ints\",\n",
|
||||
" \"args\": {\"min\": 0, \"max\": 9, \"size\": 10},\n",
|
||||
" \"id\": \"123\", # required\n",
|
||||
" \"type\": \"tool_call\", # required\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dfdc1040-bf25-4790-b4c3-59452db84e11",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can do the same when subclassing BaseTool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "fe1a09d1-378b-4b91-bb5e-0697c3d7eb92",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GenerateRandomFloats(BaseTool):\n",
|
||||
" name: str = \"generate_random_floats\"\n",
|
||||
" description: str = \"Generate size random floats in the range [min, max].\"\n",
|
||||
" response_format: str = \"content_and_artifact\"\n",
|
||||
"\n",
|
||||
" ndigits: int = 2\n",
|
||||
"\n",
|
||||
" def _run(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
|
||||
" range_ = max - min\n",
|
||||
" array = [\n",
|
||||
" round(min + (range_ * random.random()), ndigits=self.ndigits)\n",
|
||||
" for _ in range(size)\n",
|
||||
" ]\n",
|
||||
" content = f\"Generated {size} floats in [{min}, {max}], rounded to {self.ndigits} decimals.\"\n",
|
||||
" return content, array\n",
|
||||
"\n",
|
||||
" # Optionally define an equivalent async method\n",
|
||||
"\n",
|
||||
" # async def _arun(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
|
||||
" # ..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "8c3d16f6-1c4a-48ab-b05a-38547c592e79",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.4277, 0.7578, 2.4871])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rand_gen = GenerateRandomFloats(ndigits=4)\n",
|
||||
"\n",
|
||||
"rand_gen.invoke(\n",
|
||||
" {\n",
|
||||
" \"name\": \"generate_random_floats\",\n",
|
||||
" \"args\": {\"min\": 0.1, \"max\": 3.3333, \"size\": 3},\n",
|
||||
" \"id\": \"123\",\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -609,7 +962,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -23,12 +23,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install \"unstructured[html]\""
|
||||
"%pip install unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"id": "7d167ca3-c7c7-4ef0-b509-080629f0f482",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -36,14 +36,14 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='My First Heading\\n\\nMy first paragraph.', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html'})]\n"
|
||||
"[Document(page_content='My First Heading\\n\\nMy first paragraph.', metadata={'source': '../../docs/integrations/document_loaders/example_data/fake-content.html'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredHTMLLoader\n",
|
||||
"\n",
|
||||
"file_path = \"../../../docs/integrations/document_loaders/example_data/fake-content.html\"\n",
|
||||
"file_path = \"../../docs/integrations/document_loaders/example_data/fake-content.html\"\n",
|
||||
"\n",
|
||||
"loader = UnstructuredHTMLLoader(file_path)\n",
|
||||
"data = loader.load()\n",
|
||||
@@ -73,7 +73,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"id": "0a2050a8-6df6-4696-9889-ba367d6f9caa",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -81,7 +81,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n', metadata={'source': '../../../docs/integrations/document_loaders/example_data/fake-content.html', 'title': 'Test Title'})]\n"
|
||||
"[Document(page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n', metadata={'source': '../../docs/integrations/document_loaders/example_data/fake-content.html', 'title': 'Test Title'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -111,7 +111,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -21,12 +21,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": null,
|
||||
"id": "c8b147fb-6877-4f7a-b2ee-ee971c7bc662",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install \"unstructured[md]\""
|
||||
"%pip install \"unstructured[md]\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -39,7 +39,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "80c50cc4-7ce9-4418-81b9-29c52c7b3627",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -62,7 +62,7 @@
|
||||
"from langchain_community.document_loaders import UnstructuredMarkdownLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"markdown_path = \"../../../../README.md\"\n",
|
||||
"markdown_path = \"../../../README.md\"\n",
|
||||
"loader = UnstructuredMarkdownLoader(markdown_path)\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
@@ -84,7 +84,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"id": "a986bbce-7fd3-41d1-bc47-49f9f57c7cd1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -92,11 +92,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of documents: 65\n",
|
||||
"Number of documents: 66\n",
|
||||
"\n",
|
||||
"page_content='🦜️🔗 LangChain' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'Title'}\n",
|
||||
"page_content='🦜️🔗 LangChain' metadata={'source': '../../../README.md', 'category_depth': 0, 'last_modified': '2024-06-28T15:20:01', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': '../../..', 'filename': 'README.md', 'category': 'Title'}\n",
|
||||
"\n",
|
||||
"page_content='⚡ Build context-aware reasoning applications ⚡' metadata={'source': '../../../../README.md', 'last_modified': '2024-04-29T13:40:19', 'page_number': 1, 'languages': ['eng'], 'parent_id': 'c3223b6f7100be08a78f1e8c0c28fde1', 'filetype': 'text/markdown', 'file_directory': '../../../..', 'filename': 'README.md', 'category': 'NarrativeText'}\n",
|
||||
"page_content='⚡ Build context-aware reasoning applications ⚡' metadata={'source': '../../../README.md', 'last_modified': '2024-06-28T15:20:01', 'languages': ['eng'], 'parent_id': '200b8a7d0dd03f66e4f13456566d2b3a', 'filetype': 'text/markdown', 'file_directory': '../../..', 'filename': 'README.md', 'category': 'NarrativeText'}\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
@@ -121,7 +121,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"id": "75abc139-3ded-4e8e-9f21-d0c8ec40fdfc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -129,13 +129,21 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'Title', 'NarrativeText', 'ListItem'}\n"
|
||||
"{'ListItem', 'NarrativeText', 'Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(set(document.metadata[\"category\"] for document in data))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "223b4c11",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -154,7 +162,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -58,6 +58,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import Runnable, RunnablePassthrough, chain\n",
|
||||
@@ -86,7 +88,7 @@
|
||||
" # NOTE: This is returning another Runnable, not an actual output.\n",
|
||||
" return contextualize_question\n",
|
||||
" else:\n",
|
||||
" return RunnablePassthrough()\n",
|
||||
" return RunnablePassthrough() | itemgetter(\"question\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
|
||||
@@ -67,15 +67,16 @@ If you'd prefer not to set an environment variable you can pass the key in direc
|
||||
```python
|
||||
from langchain_cohere import CohereEmbeddings
|
||||
|
||||
embeddings_model = CohereEmbeddings(cohere_api_key="...")
|
||||
embeddings_model = CohereEmbeddings(cohere_api_key="...", model='embed-english-v3.0')
|
||||
```
|
||||
|
||||
Otherwise you can initialize without any params:
|
||||
Otherwise you can initialize simply as shown below:
|
||||
```python
|
||||
from langchain_cohere import CohereEmbeddings
|
||||
|
||||
embeddings_model = CohereEmbeddings()
|
||||
embeddings_model = CohereEmbeddings(model='embed-english-v3.0')
|
||||
```
|
||||
Do note that it is mandatory to pass the model parameter while initializing the CohereEmbeddings class.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="huggingface" label="Hugging Face">
|
||||
|
||||
@@ -246,11 +246,11 @@
|
||||
"examples = [\n",
|
||||
" (\n",
|
||||
" \"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\",\n",
|
||||
" Person(name=None, height_in_meters=None, hair_color=None),\n",
|
||||
" Data(people=[]),\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" \"Fiona traveled far from France to Spain.\",\n",
|
||||
" Person(name=\"Fiona\", height_in_meters=None, hair_color=None),\n",
|
||||
" Data(people=[Person(name=\"Fiona\", height_in_meters=None, hair_color=None)]),\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
|
||||
"- [Example selectors](/docs/concepts/#example-selectors)\n",
|
||||
"- [LLMs](/docs/concepts/#llms)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vectorstores)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vector-stores)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"- [Prompt templates](/docs/concepts/#prompt-templates)\n",
|
||||
"- [Example selectors](/docs/concepts/#example-selectors)\n",
|
||||
"- [Chat models](/docs/concepts/#chat-model)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vectorstores)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vector-stores)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -51,7 +51,7 @@
|
||||
"- `examples`: A list of dictionary examples to include in the final prompt.\n",
|
||||
"- `example_prompt`: converts each example into 1 or more messages through its [`format_messages`](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html?highlight=format_messages#langchain_core.prompts.chat.ChatPromptTemplate.format_messages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n",
|
||||
"\n",
|
||||
"Below is a simple demonstration. First, define the examples you'd like to include:"
|
||||
"Below is a simple demonstration. First, define the examples you'd like to include. Let's give the LLM an unfamiliar mathematical operator, denoted by the \"🦜\" emoji:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -59,17 +59,7 @@
|
||||
"execution_count": 1,
|
||||
"id": "5b79e400",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
|
||||
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-chroma\n",
|
||||
"\n",
|
||||
@@ -79,9 +69,50 @@
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "30856d92",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we try to ask the model what the result of this expression is, it will fail:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"id": "174dec5b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The expression \"2 🦜 9\" is not a standard mathematical operation or equation. It appears to be a combination of the number 2 and the parrot emoji 🦜 followed by the number 9. It does not have a specific mathematical meaning.', response_metadata={'token_usage': {'completion_tokens': 54, 'prompt_tokens': 17, 'total_tokens': 71}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-aad12dda-5c47-4a1e-9949-6fe94e03242a-0', usage_metadata={'input_tokens': 17, 'output_tokens': 54, 'total_tokens': 71})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
|
||||
"\n",
|
||||
"model.invoke(\"What is 2 🦜 9?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e6d58385",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's see what happens if we give the LLM some examples to work with. We'll define some below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0fc5a02a-6249-4e92-95c3-30fff9671e8b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -91,8 +122,8 @@
|
||||
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2+3\", \"output\": \"5\"},\n",
|
||||
" {\"input\": \"2 🦜 2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2 🦜 3\", \"output\": \"5\"},\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
@@ -106,7 +137,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"id": "65e72ad1-9060-47d0-91a1-bc130c8b98ac",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -116,7 +147,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[HumanMessage(content='2+2'), AIMessage(content='4'), HumanMessage(content='2+3'), AIMessage(content='5')]\n"
|
||||
"[HumanMessage(content='2 🦜 2'), AIMessage(content='4'), HumanMessage(content='2 🦜 3'), AIMessage(content='5')]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -146,7 +177,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "9f86d6d9-50de-41b6-b6c7-0f9980cc0187",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -162,9 +193,17 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd8029c5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now let's ask the model the initial question and see how it does:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"id": "97d443b1-6fae-4b36-bede-3ff7306288a3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -173,10 +212,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='A triangle does not have a square. The square of a number is the result of multiplying the number by itself.', response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 52, 'total_tokens': 75}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-3456c4ef-7b4d-4adb-9e02-8079de82a47a-0')"
|
||||
"AIMessage(content='11', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5ec4e051-262f-408e-ad00-3f2ebeb561c3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -184,9 +223,9 @@
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chain = final_prompt | ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
|
||||
"chain = final_prompt | model\n",
|
||||
"\n",
|
||||
"chain.invoke({\"input\": \"What's the square of a triangle?\"})"
|
||||
"chain.invoke({\"input\": \"What is 2 🦜 9?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -194,6 +233,8 @@
|
||||
"id": "70ab7114-f07f-46be-8874-3705a25aba5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we can see that the model has now inferred that the parrot emoji means addition from the given few-shot examples!\n",
|
||||
"\n",
|
||||
"## Dynamic few-shot prompting\n",
|
||||
"\n",
|
||||
"Sometimes you may want to select only a few examples from your overall set to show based on the input. For this, you can replace the `examples` passed into `FewShotChatMessagePromptTemplate` with an `example_selector`. The other components remain the same as above! Our dynamic few-shot prompt template would look like:\n",
|
||||
@@ -208,7 +249,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 9,
|
||||
"id": "ad66f06a-66fd-4fcc-8166-5d0e3c801e57",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -220,9 +261,9 @@
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2+3\", \"output\": \"5\"},\n",
|
||||
" {\"input\": \"2+4\", \"output\": \"6\"},\n",
|
||||
" {\"input\": \"2 🦜 2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2 🦜 3\", \"output\": \"5\"},\n",
|
||||
" {\"input\": \"2 🦜 4\", \"output\": \"6\"},\n",
|
||||
" {\"input\": \"What did the cow say to the moon?\", \"output\": \"nothing at all\"},\n",
|
||||
" {\n",
|
||||
" \"input\": \"Write me a poem about the moon\",\n",
|
||||
@@ -247,7 +288,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"id": "7790303a-f722-452e-8921-b14bdf20bdff",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -257,10 +298,10 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'input': 'What did the cow say to the moon?', 'output': 'nothing at all'},\n",
|
||||
" {'input': '2+4', 'output': '6'}]"
|
||||
" {'input': '2 🦜 4', 'output': '6'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -287,7 +328,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 11,
|
||||
"id": "253c255e-41d7-45f6-9d88-c7a0ced4b1bd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -297,7 +338,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[HumanMessage(content='2+3'), AIMessage(content='5'), HumanMessage(content='2+2'), AIMessage(content='4')]\n"
|
||||
"[HumanMessage(content='2 🦜 3'), AIMessage(content='5'), HumanMessage(content='2 🦜 4'), AIMessage(content='6')]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -317,7 +358,7 @@
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(few_shot_prompt.invoke(input=\"What's 3+3?\").to_messages())"
|
||||
"print(few_shot_prompt.invoke(input=\"What's 3 🦜 3?\").to_messages())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -330,7 +371,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 12,
|
||||
"id": "e731cb45-f0ea-422c-be37-42af2a6cb2c4",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -340,7 +381,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"messages=[HumanMessage(content='2+3'), AIMessage(content='5'), HumanMessage(content='2+2'), AIMessage(content='4')]\n"
|
||||
"messages=[HumanMessage(content='2 🦜 3'), AIMessage(content='5'), HumanMessage(content='2 🦜 4'), AIMessage(content='6')]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -353,7 +394,7 @@
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(few_shot_prompt.invoke(input=\"What's 3+3?\"))"
|
||||
"print(few_shot_prompt.invoke(input=\"What's 3 🦜 3?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -368,7 +409,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 13,
|
||||
"id": "0568cbc6-5354-47f1-ab4d-dfcc616cf583",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -377,10 +418,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 51, 'total_tokens': 52}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'stop', 'logprobs': None}, id='run-6bcbe158-a8e3-4a85-a754-1ba274a9f147-0')"
|
||||
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d1863e5e-17cd-4e9d-bf7a-b9f118747a65-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -388,7 +429,7 @@
|
||||
"source": [
|
||||
"chain = final_prompt | ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
|
||||
"\n",
|
||||
"chain.invoke({\"input\": \"What's 3+3?\"})"
|
||||
"chain.invoke({\"input\": \"What's 3 🦜 3?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -428,7 +469,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
203
docs/docs/how_to/filter_messages.ipynb
Normal file
203
docs/docs/how_to/filter_messages.ipynb
Normal file
@@ -0,0 +1,203 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e389175d-8a65-4f0d-891c-dbdfabb3c3ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to filter messages\n",
|
||||
"\n",
|
||||
"In more complex chains and agents we might track state with a list of messages. This list can start to accumulate messages from multiple different models, speakers, sub-chains, etc., and we may only want to pass subsets of this full list of messages to each model call in the chain/agent.\n",
|
||||
"\n",
|
||||
"The `filter_messages` utility makes it easy to filter messages by type, id, or name.\n",
|
||||
"\n",
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f4ad2fd3-3cab-40d4-a989-972115865b8b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='example input', name='example_user', id='2'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" filter_messages,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\"you are a good assistant\", id=\"1\"),\n",
|
||||
" HumanMessage(\"example input\", id=\"2\", name=\"example_user\"),\n",
|
||||
" AIMessage(\"example output\", id=\"3\", name=\"example_assistant\"),\n",
|
||||
" HumanMessage(\"real input\", id=\"4\", name=\"bob\"),\n",
|
||||
" AIMessage(\"real output\", id=\"5\", name=\"alice\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"filter_messages(messages, include_types=\"human\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7b663a1e-a8ae-453e-a072-8dd75dfab460",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content='you are a good assistant', id='1'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_messages(messages, exclude_names=[\"example_user\", \"example_assistant\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "db170e46-03f8-4710-b967-23c70c3ac054",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='example input', name='example_user', id='2'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_messages(messages, include_types=[HumanMessage, AIMessage], exclude_ids=[\"3\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b7c4e5ad-d1b4-4c18-b250-864adde8f0dd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`filter_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "675f8f79-db39-401c-a582-1df2478cba30",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=[], response_metadata={'id': 'msg_01Wz7gBHahAwkZ1KCBNtXmwA', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 3}}, id='run-b5d8a3fe-004f-4502-a071-a6c025031827-0', usage_metadata={'input_tokens': 16, 'output_tokens': 3, 'total_tokens': 19})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# pip install -U langchain-anthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)\n",
|
||||
"# Notice we don't pass in messages. This creates\n",
|
||||
"# a RunnableLambda that takes messages as input\n",
|
||||
"filter_ = filter_messages(exclude_names=[\"example_user\", \"example_assistant\"])\n",
|
||||
"chain = filter_ | llm\n",
|
||||
"chain.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4133ab28-f49c-480f-be92-b51eb6559153",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace we can see that before the messages are passed to the model they are filtered: https://smith.langchain.com/public/f808a724-e072-438e-9991-657cc9e7e253/r\n",
|
||||
"\n",
|
||||
"Looking at just the filter_, we can see that it's a Runnable object that can be invoked like all Runnables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c090116a-1fef-43f6-a178-7265dff9db00",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff339066-d424-4042-8cca-cd4b007c1a8e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For a complete description of all arguments head to the API reference: https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.filter_messages.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -300,7 +300,11 @@
|
||||
"id": "922b48bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Streaming\n",
|
||||
"## Streaming\n",
|
||||
"\n",
|
||||
":::{.callout-note}\n",
|
||||
"[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) is best suited for code that does not need to support streaming. If you need to support streaming (i.e., be able to operate on chunks of inputs and yield chunks of outputs), use [RunnableGenerator](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableGenerator.html) instead as in the example below.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a chain.\n",
|
||||
"\n",
|
||||
|
||||
@@ -9,11 +9,13 @@
|
||||
"source": [
|
||||
"# Hybrid Search\n",
|
||||
"\n",
|
||||
"The standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, ...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n",
|
||||
"The standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, Qdrant...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n",
|
||||
"\n",
|
||||
"**Step 1: Make sure the vectorstore you are using supports hybrid search**\n",
|
||||
"\n",
|
||||
"At the moment, there is no unified way to perform hybrid search in LangChain. Each vectorstore may have their own way to do it. This is generally exposed as a keyword argument that is passed in during `similarity_search`. By reading the documentation or source code, figure out whether the vectorstore you are using supports hybrid search, and, if so, how to use it.\n",
|
||||
"At the moment, there is no unified way to perform hybrid search in LangChain. Each vectorstore may have their own way to do it. This is generally exposed as a keyword argument that is passed in during `similarity_search`.\n",
|
||||
"\n",
|
||||
"By reading the documentation or source code, figure out whether the vectorstore you are using supports hybrid search, and, if so, how to use it.\n",
|
||||
"\n",
|
||||
"**Step 2: Add that parameter as a configurable field for the chain**\n",
|
||||
"\n",
|
||||
|
||||
@@ -21,7 +21,7 @@ For comprehensive descriptions of every class and function see the [API Referenc
|
||||
This highlights functionality that is core to using LangChain.
|
||||
|
||||
- [How to: return structured data from a model](/docs/how_to/structured_output/)
|
||||
- [How to: use a model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: use a model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream runnables](/docs/how_to/streaming)
|
||||
- [How to: debug your LLM apps](/docs/how_to/debugging/)
|
||||
|
||||
@@ -43,6 +43,8 @@ This highlights functionality that is core to using LangChain.
|
||||
- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/)
|
||||
- [How to: inspect runnables](/docs/how_to/inspect)
|
||||
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
|
||||
- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains)
|
||||
- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets)
|
||||
|
||||
## Components
|
||||
|
||||
@@ -80,6 +82,20 @@ These are the core building blocks you can use when building applications.
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: let your end users choose their model](/docs/how_to/chat_models_universal_init/)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: stream tool calls](/docs/how_to/tool_streaming)
|
||||
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
||||
- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific)
|
||||
- [How to: force a specific tool call](/docs/how_to/tool_choice)
|
||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||
|
||||
### Messages
|
||||
|
||||
[Messages](/docs/concepts/#messages) are the input and output of chat models. They have some `content` and a `role`, which describes the source of the message.
|
||||
|
||||
- [How to: trim messages](/docs/how_to/trim_messages/)
|
||||
- [How to: filter messages](/docs/how_to/filter_messages/)
|
||||
- [How to: merge consecutive messages of the same type](/docs/how_to/merge_message_runs/)
|
||||
|
||||
### LLMs
|
||||
|
||||
@@ -168,15 +184,23 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
### Tools
|
||||
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. Refer [here](/docs/integrations/tools/) for a list of pre-buit tools.
|
||||
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: create tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use chat models to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: add a human-in-the-loop for tools](/docs/how_to/tools_human)
|
||||
- [How to: handle tool errors](/docs/how_to/tools_error)
|
||||
- [How to: force models to call a tool](/docs/how_to/tool_choice)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel)
|
||||
- [How to: access the `RunnableConfig` from a tool](/docs/how_to/tool_configure)
|
||||
- [How to: stream events from a tool](/docs/how_to/tool_stream_events)
|
||||
- [How to: return artifacts from a tool](/docs/how_to/tool_artifacts/)
|
||||
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
|
||||
- [How to: add ad-hoc tool calling capability to models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass in runtime secrets](/docs/how_to/runnable_runtime_secrets)
|
||||
|
||||
### Multimodal
|
||||
|
||||
@@ -188,7 +212,7 @@ LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to p
|
||||
|
||||
:::note
|
||||
|
||||
For in depth how-to guides for agents, please check out [LangGraph](https://github.com/langchain-ai/langgraph) documentation.
|
||||
For in depth how-to guides for agents, please check out [LangGraph](https://langchain-ai.github.io/langgraph/) documentation.
|
||||
|
||||
:::
|
||||
|
||||
@@ -204,6 +228,7 @@ For in depth how-to guides for agents, please check out [LangGraph](https://gith
|
||||
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
|
||||
- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
|
||||
|
||||
### Custom
|
||||
|
||||
@@ -216,7 +241,10 @@ All of LangChain components can easily be extended to support your own versions.
|
||||
- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: define a custom tool](/docs/how_to/custom_tools)
|
||||
- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
|
||||
|
||||
### Serialization
|
||||
- [How to: save and load LangChain objects](/docs/how_to/serialization)
|
||||
|
||||
## Use cases
|
||||
|
||||
@@ -251,6 +279,7 @@ For a high-level tutorial on building chatbots, check out [this guide](/docs/tut
|
||||
- [How to: manage memory](/docs/how_to/chatbots_memory)
|
||||
- [How to: do retrieval](/docs/how_to/chatbots_retrieval)
|
||||
- [How to: use tools](/docs/how_to/chatbots_tools)
|
||||
- [How to: manage large chat history](/docs/how_to/trim_messages/)
|
||||
|
||||
### Query analysis
|
||||
|
||||
@@ -295,7 +324,26 @@ You can peruse [LangGraph how-to guides here](https://langchain-ai.github.io/lan
|
||||
## [LangSmith](https://docs.smith.langchain.com/)
|
||||
|
||||
LangSmith allows you to closely trace, monitor and evaluate your LLM application.
|
||||
It seamlessly integrates with LangChain, and you can use it to inspect and debug individual steps of your chains as you build.
|
||||
It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build.
|
||||
|
||||
LangSmith documentation is hosted on a separate site.
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/).
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/), but we'll highlight a few sections that are particularly
|
||||
relevant to LangChain below:
|
||||
|
||||
### Evaluation
|
||||
<span data-heading-keywords="evaluation,evaluate"></span>
|
||||
|
||||
Evaluating performance is a vital part of building LLM-powered applications.
|
||||
LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators.
|
||||
|
||||
To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides#evaluation).
|
||||
|
||||
### Tracing
|
||||
<span data-heading-keywords="trace,tracing"></span>
|
||||
|
||||
Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
|
||||
|
||||
- [How to: trace with LangChain](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain)
|
||||
- [How to: add metadata and tags to traces](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain#add-metadata-and-tags-to-traces)
|
||||
|
||||
You can see general tracing-related how-tos [in this section of the LangSmith docs](https://docs.smith.langchain.com/how_to_guides/tracing).
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MongoDBAtlasVectorSearch`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
# Installation
|
||||
# How to install LangChain packages
|
||||
|
||||
The LangChain ecosystem is split into different packages, which allow you to choose exactly which pieces of
|
||||
functionality to install.
|
||||
|
||||
## Official release
|
||||
|
||||
To install LangChain run:
|
||||
To install the main LangChain package, run:
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
@@ -21,11 +24,24 @@ import CodeBlock from "@theme/CodeBlock";
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
This will install the bare minimum requirements of LangChain.
|
||||
A lot of the value of LangChain comes when integrating it with various model providers, datastores, etc.
|
||||
While this package acts as a sane starting point to using LangChain,
|
||||
much of the value of LangChain comes when integrating it with various model providers, datastores, etc.
|
||||
By default, the dependencies needed to do that are NOT installed. You will need to install the dependencies for specific integrations separately.
|
||||
We'll show how to do that in the next sections of this guide.
|
||||
|
||||
## From source
|
||||
## Ecosystem packages
|
||||
|
||||
With the exception of the `langsmith` SDK, all packages in the LangChain ecosystem depend on `langchain-core`, which contains base
|
||||
classes and abstractions that other packages use. The dependency graph below shows how the difference packages are related.
|
||||
A directed arrow indicates that the source package depends on the target package:
|
||||
|
||||

|
||||
|
||||
When installing a package, you do not need to explicitly install that package's explicit dependencies (such as `langchain-core`).
|
||||
However, you may choose to if you are using a feature only available in a certain version of that dependency.
|
||||
If you do, you should make sure that the installed or pinned version is compatible with any other integration packages you use.
|
||||
|
||||
### From source
|
||||
|
||||
If you want to install from source, you can do so by cloning the repo and be sure that the directory is `PATH/TO/REPO/langchain/libs/langchain` running:
|
||||
|
||||
@@ -33,21 +49,21 @@ If you want to install from source, you can do so by cloning the repo and be sur
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
## LangChain core
|
||||
### LangChain core
|
||||
The `langchain-core` package contains base abstractions that the rest of the LangChain ecosystem uses, along with the LangChain Expression Language. It is automatically installed by `langchain`, but can also be used separately. Install with:
|
||||
|
||||
```bash
|
||||
pip install langchain-core
|
||||
```
|
||||
|
||||
## LangChain community
|
||||
### LangChain community
|
||||
The `langchain-community` package contains third-party integrations. Install with:
|
||||
|
||||
```bash
|
||||
pip install langchain-community
|
||||
```
|
||||
|
||||
## LangChain experimental
|
||||
### LangChain experimental
|
||||
The `langchain-experimental` package holds experimental LangChain code, intended for research and experimental uses.
|
||||
Install with:
|
||||
|
||||
@@ -55,14 +71,15 @@ Install with:
|
||||
pip install langchain-experimental
|
||||
```
|
||||
|
||||
## LangGraph
|
||||
`langgraph` is a library for building stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain.
|
||||
### LangGraph
|
||||
`langgraph` is a library for building stateful, multi-actor applications with LLMs. It integrates smoothly with LangChain, but can be used without it.
|
||||
Install with:
|
||||
|
||||
```bash
|
||||
pip install langgraph
|
||||
```
|
||||
## LangServe
|
||||
|
||||
### LangServe
|
||||
LangServe helps developers deploy LangChain runnables and chains as a REST API.
|
||||
LangServe is automatically installed by LangChain CLI.
|
||||
If not using LangChain CLI, install with:
|
||||
@@ -80,9 +97,10 @@ Install with:
|
||||
pip install langchain-cli
|
||||
```
|
||||
|
||||
## LangSmith SDK
|
||||
The LangSmith SDK is automatically installed by LangChain.
|
||||
If not using LangChain, install with:
|
||||
### LangSmith SDK
|
||||
The LangSmith SDK is automatically installed by LangChain. However, it does not depend on
|
||||
`langchain-core`, and can be installed and used independently if desired.
|
||||
If you are not using LangChain, you can install it with:
|
||||
|
||||
```bash
|
||||
pip install langsmith
|
||||
|
||||
202
docs/docs/how_to/merge_message_runs.ipynb
Normal file
202
docs/docs/how_to/merge_message_runs.ipynb
Normal file
@@ -0,0 +1,202 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac47bfab-0f4f-42ce-8bb6-898ef22a0338",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to merge consecutive messages of the same type\n",
|
||||
"\n",
|
||||
"Certain models do not support passing in consecutive messages of the same type (a.k.a. \"runs\" of the same message type).\n",
|
||||
"\n",
|
||||
"The `merge_message_runs` utility makes it easy to merge consecutive messages of the same type.\n",
|
||||
"\n",
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1a215bbb-c05c-40b0-a6fd-d94884d517df",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\")\n",
|
||||
"\n",
|
||||
"HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'])\n",
|
||||
"\n",
|
||||
"AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" merge_message_runs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\"you're a good assistant.\"),\n",
|
||||
" SystemMessage(\"you always respond with a joke.\"),\n",
|
||||
" HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}]),\n",
|
||||
" HumanMessage(\"and who is harrison chasing anyways\"),\n",
|
||||
" AIMessage(\n",
|
||||
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
|
||||
" ),\n",
|
||||
" AIMessage(\"Why, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"merged = merge_message_runs(messages)\n",
|
||||
"print(\"\\n\\n\".join([repr(x) for x in merged]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0544c811-7112-4b76-8877-cc897407c738",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Notice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "11f7e8d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `merge_message_runs` utility also works with messages composed together using the overloaded `+` operation:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b51855c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = (\n",
|
||||
" SystemMessage(\"you're a good assistant.\")\n",
|
||||
" + SystemMessage(\"you always respond with a joke.\")\n",
|
||||
" + HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}])\n",
|
||||
" + HumanMessage(\"and who is harrison chasing anyways\")\n",
|
||||
" + AIMessage(\n",
|
||||
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
|
||||
" )\n",
|
||||
" + AIMessage(\n",
|
||||
" \"Why, he's probably chasing after the last cup of coffee in the office!\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"merged = merge_message_runs(messages)\n",
|
||||
"print(\"\\n\\n\".join([repr(x) for x in merged]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b2eee74-71c8-4168-b968-bca580c25d18",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`merge_message_runs` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6d5a0283-11f8-435b-b27b-7b18f7693592",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=[], response_metadata={'id': 'msg_01D6R8Naum57q8qBau9vLBUX', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-ac0c465b-b54f-4b8b-9295-e5951250d653-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# pip install -U langchain-anthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)\n",
|
||||
"# Notice we don't pass in messages. This creates\n",
|
||||
"# a RunnableLambda that takes messages as input\n",
|
||||
"merger = merge_message_runs()\n",
|
||||
"chain = merger | llm\n",
|
||||
"chain.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72e90dce-693c-4842-9526-ce6460fe956b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace we can see that before the messages are passed to the model they are merged: https://smith.langchain.com/public/ab558677-cac9-4c59-9066-1ecce5bcd87c/r\n",
|
||||
"\n",
|
||||
"Looking at just the merger, we can see that it's a Runnable object that can be invoked like all Runnables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "460817a6-c327-429d-958e-181a8c46059c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\"),\n",
|
||||
" HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways']),\n",
|
||||
" AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"merger.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4548d916-ce21-4dc6-8f19-eedb8003ace6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For a complete description of all arguments head to the API reference: https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.merge_message_runs.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -129,7 +129,7 @@
|
||||
"id": "a531da5e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## What is the runnable you are trying wrap?\n",
|
||||
"## What is the runnable you are trying to wrap?\n",
|
||||
"\n",
|
||||
"`RunnableWithMessageHistory` can only wrap certain types of Runnables. Specifically, it can be used for any Runnable that takes as input one of:\n",
|
||||
"\n",
|
||||
@@ -898,7 +898,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "adc7ee09",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [create_react_agent, create_react_agent()]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "457cdc67-1893-4653-8b0c-b185a5947e74",
|
||||
@@ -7,9 +21,18 @@
|
||||
"source": [
|
||||
"# How to migrate from legacy LangChain agents to LangGraph\n",
|
||||
"\n",
|
||||
"Here we focus on how to move from legacy LangChain agents to LangGraph agents.\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Agents](/docs/concepts/#agents)\n",
|
||||
"- [LangGraph](https://langchain-ai.github.io/langgraph/)\n",
|
||||
"- [Tool calling](/docs/how_to/tool_calling/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Here we focus on how to move from legacy LangChain agents to more flexible [LangGraph](https://langchain-ai.github.io/langgraph/) agents.\n",
|
||||
"LangChain agents (the [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor) in particular) have multiple configuration parameters.\n",
|
||||
"In this notebook we will show how those parameters map to the LangGraph [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent).\n",
|
||||
"In this notebook we will show how those parameters map to the LangGraph react agent executor using the [create_react_agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) prebuilt helper method.\n",
|
||||
"\n",
|
||||
"#### Prerequisites\n",
|
||||
"\n",
|
||||
@@ -195,7 +218,7 @@
|
||||
"\n",
|
||||
"Let's take a look at all of these below. We will pass in custom instructions to get the agent to respond in Spanish.\n",
|
||||
"\n",
|
||||
"First up, using AgentExecutor:"
|
||||
"First up, using `AgentExecutor`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -238,7 +261,16 @@
|
||||
"id": "bd5f5500-5ae4-4000-a9fd-8c5a2cc6404d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent). This can either be a string or a LangChain SystemMessage."
|
||||
"Now, let's pass a custom system message to [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent).\n",
|
||||
"\n",
|
||||
"LangGraph's prebuilt `create_react_agent` does not take a prompt template directly as a parameter, but instead takes a [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) parameter. This modifies messages before they are passed into the model, and can be one of four values:\n",
|
||||
"\n",
|
||||
"- A `SystemMessage`, which is added to the beginning of the list of messages.\n",
|
||||
"- A `string`, which is converted to a `SystemMessage` and added to the beginning of the list of messages.\n",
|
||||
"- A `Callable`, which should take in a list of messages. The output is then passed to the language model.\n",
|
||||
"- Or a [`Runnable`](/docs/concepts/#langchain-expression-language-lcel), which should should take in a list of messages. The output is then passed to the language model.\n",
|
||||
"\n",
|
||||
"Here's how it looks in action:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -319,7 +351,15 @@
|
||||
"id": "68df3a09",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Memory\n",
|
||||
"## Memory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96e7ffc8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could add chat [Memory](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.memory) so it can engage in a multi-turn conversation."
|
||||
]
|
||||
@@ -407,7 +447,7 @@
|
||||
"id": "c2a5a32f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### In LangGraph\n",
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"Memory is just [persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/), aka [checkpointing](https://langchain-ai.github.io/langgraph/reference/checkpoints/).\n",
|
||||
"\n",
|
||||
@@ -478,6 +518,8 @@
|
||||
"source": [
|
||||
"## Iterating through steps\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could iterate over the steps using the [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) (or async `astream`) methods or the [iter](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter) method. LangGraph supports stepwise iteration using [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) "
|
||||
]
|
||||
},
|
||||
@@ -536,7 +578,7 @@
|
||||
"id": "46ccbcbf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### In LangGraph\n",
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"In LangGraph, things are handled natively using [stream](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.graph.CompiledGraph.stream) or the asynchronous `astream` method."
|
||||
]
|
||||
@@ -587,6 +629,8 @@
|
||||
"source": [
|
||||
"## `return_intermediate_steps`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"Setting this parameter on AgentExecutor allows users to access intermediate_steps, which pairs agent actions (e.g., tool invocations) with their outcomes.\n"
|
||||
]
|
||||
},
|
||||
@@ -615,6 +659,8 @@
|
||||
"id": "594f7567-302f-4fa8-85bb-025ac8322162",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"By default the [react agent executor](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) in LangGraph appends all messages to the central state. Therefore, it is easy to see any intermediate steps by just looking at the full state."
|
||||
]
|
||||
},
|
||||
@@ -655,11 +701,9 @@
|
||||
"source": [
|
||||
"## `max_iterations`\n",
|
||||
"\n",
|
||||
"`AgentExecutor` implements a `max_iterations` parameter, whereas this is controlled via `recursion_limit` in LangGraph.\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"Note that in AgentExecutor, an \"iteration\" includes a full turn of tool invocation and execution. In LangGraph, each step contributes to the recursion limit, so we will need to multiply by two (and add one) to get equivalent results.\n",
|
||||
"\n",
|
||||
"If the recursion limit is reached, LangGraph raises a specific exception type, that we can catch and manage similarly to AgentExecutor."
|
||||
"`AgentExecutor` implements a `max_iterations` parameter, allowing users to abort a run that exceeds a specified number of iterations."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -737,6 +781,20 @@
|
||||
"agent_executor.invoke({\"input\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd3a933f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"In LangGraph this is controlled via `recursion_limit` configuration parameter.\n",
|
||||
"\n",
|
||||
"Note that in `AgentExecutor`, an \"iteration\" includes a full turn of tool invocation and execution. In LangGraph, each step contributes to the recursion limit, so we will need to multiply by two (and add one) to get equivalent results.\n",
|
||||
"\n",
|
||||
"If the recursion limit is reached, LangGraph raises a specific exception type, that we can catch and manage similarly to AgentExecutor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
@@ -782,6 +840,8 @@
|
||||
"source": [
|
||||
"## `max_execution_time`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"`AgentExecutor` implements a `max_execution_time` parameter, allowing users to abort a run that exceeds a total time limit."
|
||||
]
|
||||
},
|
||||
@@ -848,6 +908,8 @@
|
||||
"id": "d02eb025",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"With LangGraph's react agent, you can control timeouts on two levels. \n",
|
||||
"\n",
|
||||
"You can set a `step_timeout` to bound each **step**:"
|
||||
@@ -936,6 +998,8 @@
|
||||
"source": [
|
||||
"## `early_stopping_method`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could configure an [early_stopping_method](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.early_stopping_method) to either return a string saying \"Agent stopped due to iteration limit or time limit.\" (`\"force\"`) or prompt the LLM a final time to respond (`\"generate\"`)."
|
||||
]
|
||||
},
|
||||
@@ -996,7 +1060,7 @@
|
||||
"id": "706e05c4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### In LangGraph\n",
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"In LangGraph, you can explicitly handle the response behavior outside the agent, since the full state can be accessed."
|
||||
]
|
||||
@@ -1045,6 +1109,8 @@
|
||||
"source": [
|
||||
"## `trim_intermediate_steps`\n",
|
||||
"\n",
|
||||
"### In LangChain\n",
|
||||
"\n",
|
||||
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), you could trim the intermediate steps of long-running agents using [trim_intermediate_steps](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.trim_intermediate_steps), which is either an integer (indicating the agent should keep the last N steps) or a custom function.\n",
|
||||
"\n",
|
||||
"For instance, we could trim the value so the agent only sees the most recent intermediate step."
|
||||
@@ -1148,7 +1214,7 @@
|
||||
"id": "3d450c5a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### In LangGraph\n",
|
||||
"### In LangGraph\n",
|
||||
"\n",
|
||||
"We can use the [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) just as before when passing in [prompt templates](#prompt-templates)."
|
||||
]
|
||||
@@ -1212,6 +1278,18 @@
|
||||
"except GraphRecursionError as e:\n",
|
||||
" print(\"Stopping agent prematurely due to triggering stop condition\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41377eb8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to migrate your LangChain agent executors to LangGraph.\n",
|
||||
"\n",
|
||||
"Next, check out other [LangGraph how-to guides](https://langchain-ai.github.io/langgraph/how-tos/)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
798
docs/docs/how_to/migrate_chains.ipynb
Normal file
798
docs/docs/how_to/migrate_chains.ipynb
Normal file
@@ -0,0 +1,798 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f331037f-be3f-4782-856f-d55dab952488",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to migrate chains to LCEL\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [LangChain Expression Language](/docs/concepts#langchain-expression-language-lcel)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"LCEL is designed to streamline the process of building useful apps with LLMs and combining related components. It does this by providing:\n",
|
||||
"\n",
|
||||
"1. **A unified interface**: Every LCEL object implements the `Runnable` interface, which defines a common set of invocation methods (`invoke`, `batch`, `stream`, `ainvoke`, ...). This makes it possible to also automatically and consistently support useful operations like streaming of intermediate steps and batching, since every chain composed of LCEL objects is itself an LCEL object.\n",
|
||||
"2. **Composition primitives**: LCEL provides a number of primitives that make it easy to compose chains, parallelize components, add fallbacks, dynamically configure chain internals, and more.\n",
|
||||
"\n",
|
||||
"LangChain maintains a number of legacy abstractions. Many of these can be reimplemented via short combinations of LCEL primitives. Doing so confers some general advantages:\n",
|
||||
"\n",
|
||||
"- The resulting chains typically implement the full `Runnable` interface, including streaming and asynchronous support where appropriate;\n",
|
||||
"- The chains may be more easily extended or modified;\n",
|
||||
"- The parameters of the chain are typically surfaced for easier customization (e.g., prompts) over previous versions, which tended to be subclasses and had opaque parameters and internals.\n",
|
||||
"\n",
|
||||
"The LCEL implementations can be slightly more verbose, but there are significant benefits in transparency and customizability.\n",
|
||||
"\n",
|
||||
"In this guide we review LCEL implementations of common legacy abstractions. Where appropriate, we link out to separate guides with more detail."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b99b47ec",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-community langchain langchain-openai faiss-cpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "717c8673",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3621b62-a037-42b8-8faa-59575608bb8b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `LLMChain`\n",
|
||||
"<span data-heading-keywords=\"llmchain\"></span>\n",
|
||||
"\n",
|
||||
"[`LLMChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) combined a prompt template, LLM, and output parser into a class.\n",
|
||||
"\n",
|
||||
"Some advantages of switching to the LCEL implementation are:\n",
|
||||
"\n",
|
||||
"- Clarity around contents and parameters. The legacy `LLMChain` contains a default output parser and other options.\n",
|
||||
"- Easier streaming. `LLMChain` only supports streaming via callbacks.\n",
|
||||
"- Easier access to raw message outputs if desired. `LLMChain` only exposes these via a parameter or via callback.\n",
|
||||
"\n",
|
||||
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
|
||||
"\n",
|
||||
"<ColumnContainer>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"#### Legacy\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "e628905c-430e-4e4a-9d7c-c91d2f42052e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'adjective': 'funny',\n",
|
||||
" 'text': \"Why couldn't the bicycle find its way home?\\n\\nBecause it lost its bearings!\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"user\", \"Tell me a {adjective} joke\")],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = LLMChain(llm=ChatOpenAI(), prompt=prompt)\n",
|
||||
"\n",
|
||||
"chain({\"adjective\": \"funny\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0d2a7cf8-1bc7-405c-bb0d-f2ab2ba3b6ab",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two tired!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"user\", \"Tell me a {adjective} joke\")],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"chain.invoke({\"adjective\": \"funny\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3c0b0513-77b8-4371-a20e-3e487cec7e7f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"\n",
|
||||
"Note that `LLMChain` by default returns a `dict` containing both the input and the output. If this behavior is desired, we can replicate it using another LCEL primitive, [`RunnablePassthrough`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "529206c5-abbe-4213-9e6c-3b8586c8000d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'adjective': 'funny',\n",
|
||||
" 'text': \"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two tired!\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"outer_chain = RunnablePassthrough().assign(text=chain)\n",
|
||||
"\n",
|
||||
"outer_chain.invoke({\"adjective\": \"funny\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "29d2e26c-2854-4971-9c2b-613450993921",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [this tutorial](/docs/tutorials/llm_chain) for more detail on building with prompt templates, LLMs, and output parsers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "00df631d-5121-4918-94aa-b88acce9b769",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `ConversationChain`\n",
|
||||
"<span data-heading-keywords=\"conversationchain\"></span>\n",
|
||||
"\n",
|
||||
"[`ConversationChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.conversation.base.ConversationChain.html) incorporates a memory of previous messages to sustain a stateful conversation.\n",
|
||||
"\n",
|
||||
"Some advantages of switching to the LCEL implementation are:\n",
|
||||
"\n",
|
||||
"- Innate support for threads/separate sessions. To make this work with `ConversationChain`, you'd need to instantiate a separate memory class outside the chain.\n",
|
||||
"- More explicit parameters. `ConversationChain` contains a hidden default prompt, which can cause confusion.\n",
|
||||
"- Streaming support. `ConversationChain` only supports streaming via callbacks.\n",
|
||||
"\n",
|
||||
"`RunnableWithMessageHistory` implements sessions via configuration parameters. It should be instantiated with a callable that returns a [chat message history](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html). By default, it expects this function to take a single argument `session_id`.\n",
|
||||
"\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"#### Legacy\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "4f2cc6dc-d70a-4c13-9258-452f14290da6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'how are you?',\n",
|
||||
" 'history': '',\n",
|
||||
" 'response': \"Arrr, I be doin' well, me matey! Just sailin' the high seas in search of treasure and adventure. How can I assist ye today?\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"template = \"\"\"\n",
|
||||
"You are a pirate. Answer the following questions as best you can.\n",
|
||||
"Chat history: {history}\n",
|
||||
"Question: {input}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory()\n",
|
||||
"\n",
|
||||
"chain = ConversationChain(\n",
|
||||
" llm=ChatOpenAI(),\n",
|
||||
" memory=memory,\n",
|
||||
" prompt=prompt,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain({\"input\": \"how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "173e1a9c-2a18-4669-b0de-136f39197786",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Arr, matey! I be sailin' the high seas with me crew, searchin' for buried treasure and adventure! How be ye doin' on this fine day?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a pirate. Answer the following questions as best you can.\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"history = InMemoryChatMessageHistory()\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(chain, lambda x: history)\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\n",
|
||||
" {\"input\": \"how are you?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"42\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b386ce6-895e-442c-88f3-7bec0ab9f401",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"\n",
|
||||
"The above example uses the same `history` for all sessions. The example below shows how to use a different chat history for each session."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "4e05994f-1fbc-4699-bf2e-62cb0e4deeb8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Ahoy there! What be ye wantin' from this old pirate?\", response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 29, 'total_tokens': 44}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1846d5f5-0dda-43b6-bb49-864e541f9c29-0', usage_metadata={'input_tokens': 29, 'output_tokens': 15, 'total_tokens': 44})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"store = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = InMemoryChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(chain, get_session_history)\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\"Hello!\", config={\"configurable\": {\"session_id\": \"abc123\"}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c36ebecb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [this tutorial](/docs/tutorials/chatbot) for a more end-to-end guide on building with [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html).\n",
|
||||
"\n",
|
||||
"## `RetrievalQA`\n",
|
||||
"<span data-heading-keywords=\"retrievalqa\"></span>\n",
|
||||
"\n",
|
||||
"The [`RetrievalQA`](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval_qa.base.RetrievalQA.html) chain performed natural-language question answering over a data source using retrieval-augmented generation.\n",
|
||||
"\n",
|
||||
"Some advantages of switching to the LCEL implementation are:\n",
|
||||
"\n",
|
||||
"- Easier customizability. Details such as the prompt and how documents are formatted are only configurable via specific parameters in the `RetrievalQA` chain.\n",
|
||||
"- More easily return source documents.\n",
|
||||
"- Support for runnable methods like streaming and async operations.\n",
|
||||
"\n",
|
||||
"Now let's look at them side-by-side. We'll use the same ingestion code to load a [blog post by Lilian Weng](https://lilianweng.github.io/posts/2023-06-23-agent/) on autonomous agents into a local vector store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1efbe16e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load docs\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"# Split\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Store splits\n",
|
||||
"vectorstore = FAISS.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())\n",
|
||||
"\n",
|
||||
"# LLM\n",
|
||||
"llm = ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c7e16438",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<ColumnContainer>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"#### Legacy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "43bf55a0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'What are autonomous agents?',\n",
|
||||
" 'result': 'Autonomous agents are LLM-empowered agents that handle autonomous design, planning, and performance of complex tasks, such as scientific experiments. These agents can browse the Internet, read documentation, execute code, call robotics experimentation APIs, and leverage other LLMs. They are capable of reasoning and planning ahead for complicated tasks by breaking them down into smaller steps.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"\n",
|
||||
"# See full prompt at https://smith.langchain.com/hub/rlm/rag-prompt\n",
|
||||
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
|
||||
"\n",
|
||||
"qa_chain = RetrievalQA.from_llm(\n",
|
||||
" llm, retriever=vectorstore.as_retriever(), prompt=prompt\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"qa_chain(\"What are autonomous agents?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "081948e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "9efcc931",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Autonomous agents are agents that can handle autonomous design, planning, and performance of complex tasks, such as scientific experiments. They can browse the Internet, read documentation, execute code, call robotics experimentation APIs, and leverage other language model models. These agents use reasoning steps to develop solutions to specific tasks, like creating a novel anticancer drug.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"# See full prompt at https://smith.langchain.com/hub/rlm/rag-prompt\n",
|
||||
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"qa_chain = (\n",
|
||||
" {\n",
|
||||
" \"context\": vectorstore.as_retriever() | format_docs,\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"qa_chain.invoke(\"What are autonomous agents?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6f44fe8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"\n",
|
||||
"The LCEL implementation exposes the internals of what's happening around retrieving, formatting documents, and passing them through a prompt to the LLM, but it is more verbose. You can customize and wrap this composition logic in a helper function, or use the higher-level [`create_retrieval_chain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html) and [`create_stuff_documents_chain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) helper method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "5fe42761",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What are autonomous agents?',\n",
|
||||
" 'context': [Document(page_content='Boiko et al. (2023) also looked into LLM-empowered agents for scientific discovery, to handle autonomous design, planning, and performance of complex scientific experiments. This agent can use tools to browse the Internet, read documentation, execute code, call robotics experimentation APIs and leverage other LLMs.\\nFor example, when requested to \"develop a novel anticancer drug\", the model came up with the following reasoning steps:', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:', 'language': 'en'}),\n",
|
||||
" Document(page_content='Weng, Lilian. (Jun 2023). “LLM-powered Autonomous Agents”. Lil’Log. https://lilianweng.github.io/posts/2023-06-23-agent/.', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:', 'language': 'en'}),\n",
|
||||
" Document(page_content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:', 'language': 'en'}),\n",
|
||||
" Document(page_content=\"LLM Powered Autonomous Agents | Lil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLil'Log\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nPosts\\n\\n\\n\\n\\nArchive\\n\\n\\n\\n\\nSearch\\n\\n\\n\\n\\nTags\\n\\n\\n\\n\\nFAQ\\n\\n\\n\\n\\nemojisearch.app\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n LLM Powered Autonomous Agents\\n \\nDate: June 23, 2023 | Estimated Reading Time: 31 min | Author: Lilian Weng\\n\\n\\n \\n\\n\\nTable of Contents\\n\\n\\n\\nAgent System Overview\\n\\nComponent One: Planning\\n\\nTask Decomposition\\n\\nSelf-Reflection\\n\\n\\nComponent Two: Memory\\n\\nTypes of Memory\\n\\nMaximum Inner Product Search (MIPS)\", metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:', 'language': 'en'})],\n",
|
||||
" 'answer': 'Autonomous agents are entities that can operate independently, making decisions and taking actions without direct human intervention. These agents can perform tasks such as planning, executing complex experiments, and leveraging various tools and resources to achieve objectives. In the context provided, LLM-powered autonomous agents are specifically designed for scientific discovery, capable of handling tasks like designing novel anticancer drugs through reasoning steps.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain.chains import create_retrieval_chain\n",
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"\n",
|
||||
"# See full prompt at https://smith.langchain.com/hub/langchain-ai/retrieval-qa-chat\n",
|
||||
"retrieval_qa_chat_prompt = hub.pull(\"langchain-ai/retrieval-qa-chat\")\n",
|
||||
"\n",
|
||||
"combine_docs_chain = create_stuff_documents_chain(llm, retrieval_qa_chat_prompt)\n",
|
||||
"rag_chain = create_retrieval_chain(vectorstore.as_retriever(), combine_docs_chain)\n",
|
||||
"\n",
|
||||
"rag_chain.invoke({\"input\": \"What are autonomous agents?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2772f4e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## `ConversationalRetrievalChain`\n",
|
||||
"<span data-heading-keywords=\"conversationalretrievalchain\"></span>\n",
|
||||
"\n",
|
||||
"The [`ConversationalRetrievalChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.conversational_retrieval.base.ConversationalRetrievalChain.html) was an all-in one way that combined retrieval-augmented generation with chat history, allowing you to \"chat with\" your documents.\n",
|
||||
"\n",
|
||||
"Advantages of switching to the LCEL implementation are similar to the `RetrievalQA` section above:\n",
|
||||
"\n",
|
||||
"- Clearer internals. The `ConversationalRetrievalChain` chain hides an entire question rephrasing step which dereferences the initial query against the chat history.\n",
|
||||
" - This means the class contains two sets of configurable prompts, LLMs, etc.\n",
|
||||
"- More easily return source documents.\n",
|
||||
"- Support for runnable methods like streaming and async operations.\n",
|
||||
"\n",
|
||||
"Here are side-by-side implementations with custom prompts. We'll reuse the loaded documents and vector store from the previous section:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8bc06416",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<ColumnContainer>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"#### Legacy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "54eb9576",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What are autonomous agents?',\n",
|
||||
" 'chat_history': '',\n",
|
||||
" 'answer': 'Autonomous agents are powered by Large Language Models (LLMs) to handle tasks like scientific discovery and complex experiments autonomously. These agents can browse the internet, read documentation, execute code, and leverage other LLMs to perform tasks. They can reason and plan ahead to decompose complicated tasks into manageable steps.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"\n",
|
||||
"condense_question_template = \"\"\"\n",
|
||||
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n",
|
||||
"\n",
|
||||
"Chat History:\n",
|
||||
"{chat_history}\n",
|
||||
"Follow Up Input: {question}\n",
|
||||
"Standalone question:\"\"\"\n",
|
||||
"\n",
|
||||
"condense_question_prompt = ChatPromptTemplate.from_template(condense_question_template)\n",
|
||||
"\n",
|
||||
"qa_template = \"\"\"\n",
|
||||
"You are an assistant for question-answering tasks.\n",
|
||||
"Use the following pieces of retrieved context to answer\n",
|
||||
"the question. If you don't know the answer, say that you\n",
|
||||
"don't know. Use three sentences maximum and keep the\n",
|
||||
"answer concise.\n",
|
||||
"\n",
|
||||
"Chat History:\n",
|
||||
"{chat_history}\n",
|
||||
"\n",
|
||||
"Other context:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"qa_prompt = ChatPromptTemplate.from_template(qa_template)\n",
|
||||
"\n",
|
||||
"convo_qa_chain = ConversationalRetrievalChain.from_llm(\n",
|
||||
" llm,\n",
|
||||
" vectorstore.as_retriever(),\n",
|
||||
" condense_question_prompt=condense_question_prompt,\n",
|
||||
" combine_docs_chain_kwargs={\n",
|
||||
" \"prompt\": qa_prompt,\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"convo_qa_chain(\n",
|
||||
" {\n",
|
||||
" \"question\": \"What are autonomous agents?\",\n",
|
||||
" \"chat_history\": \"\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "43a8a23c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "c884b138",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What are autonomous agents?',\n",
|
||||
" 'chat_history': [],\n",
|
||||
" 'context': [Document(page_content='Boiko et al. (2023) also looked into LLM-empowered agents for scientific discovery, to handle autonomous design, planning, and performance of complex scientific experiments. This agent can use tools to browse the Internet, read documentation, execute code, call robotics experimentation APIs and leverage other LLMs.\\nFor example, when requested to \"develop a novel anticancer drug\", the model came up with the following reasoning steps:', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:', 'language': 'en'}),\n",
|
||||
" Document(page_content='Weng, Lilian. (Jun 2023). “LLM-powered Autonomous Agents”. Lil’Log. https://lilianweng.github.io/posts/2023-06-23-agent/.', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:', 'language': 'en'}),\n",
|
||||
" Document(page_content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:', 'language': 'en'}),\n",
|
||||
" Document(page_content='Or\\n@article{weng2023agent,\\n title = \"LLM-powered Autonomous Agents\",\\n author = \"Weng, Lilian\",\\n journal = \"lilianweng.github.io\",\\n year = \"2023\",\\n month = \"Jun\",\\n url = \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\\n}\\nReferences#\\n[1] Wei et al. “Chain of thought prompting elicits reasoning in large language models.” NeurIPS 2022\\n[2] Yao et al. “Tree of Thoughts: Dliberate Problem Solving with Large Language Models.” arXiv preprint arXiv:2305.10601 (2023).', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', 'title': \"LLM Powered Autonomous Agents | Lil'Log\", 'description': 'Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.\\nAgent System Overview In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:', 'language': 'en'})],\n",
|
||||
" 'answer': 'Autonomous agents are entities capable of acting independently, making decisions, and performing tasks without direct human intervention. These agents can interact with their environment, perceive information, and take actions based on their goals or objectives. They often use artificial intelligence techniques to navigate and accomplish tasks in complex or dynamic environments.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import create_history_aware_retriever, create_retrieval_chain\n",
|
||||
"\n",
|
||||
"condense_question_system_template = (\n",
|
||||
" \"Given a chat history and the latest user question \"\n",
|
||||
" \"which might reference context in the chat history, \"\n",
|
||||
" \"formulate a standalone question which can be understood \"\n",
|
||||
" \"without the chat history. Do NOT answer the question, \"\n",
|
||||
" \"just reformulate it if needed and otherwise return it as is.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"condense_question_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", condense_question_system_template),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"history_aware_retriever = create_history_aware_retriever(\n",
|
||||
" llm, vectorstore.as_retriever(), condense_question_prompt\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"system_prompt = (\n",
|
||||
" \"You are an assistant for question-answering tasks. \"\n",
|
||||
" \"Use the following pieces of retrieved context to answer \"\n",
|
||||
" \"the question. If you don't know the answer, say that you \"\n",
|
||||
" \"don't know. Use three sentences maximum and keep the \"\n",
|
||||
" \"answer concise.\"\n",
|
||||
" \"\\n\\n\"\n",
|
||||
" \"{context}\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"qa_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", system_prompt),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"qa_chain = create_stuff_documents_chain(llm, qa_prompt)\n",
|
||||
"\n",
|
||||
"convo_qa_chain = create_retrieval_chain(history_aware_retriever, qa_chain)\n",
|
||||
"\n",
|
||||
"convo_qa_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input\": \"What are autonomous agents?\",\n",
|
||||
" \"chat_history\": [],\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b2717810",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"</ColumnContainer>\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now seen how to migrate existing usage of some legacy chains to LCEL.\n",
|
||||
"\n",
|
||||
"Next, check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -52,7 +52,12 @@
|
||||
" (\"system\", \"Describe the image provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [{\"type\": \"image_url\", \"image_url\": \"data:image/jpeg;base64,{image_data}\"}],\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data}\"},\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
@@ -110,11 +115,11 @@
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": \"data:image/jpeg;base64,{image_data1}\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data1}\"},\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": \"data:image/jpeg;base64,{image_data2}\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data2}\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
|
||||
@@ -22,6 +22,7 @@ the case of inheritance and in the case of passing objects to LangChain.
|
||||
|
||||
```python
|
||||
from pydantic.v1 import root_validator, validator
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
class CustomTool(BaseTool): # BaseTool is v1 code
|
||||
x: int = Field(default=1)
|
||||
@@ -48,6 +49,7 @@ Mixing Pydantic v2 primitives with Pydantic v1 primitives can raise cryptic erro
|
||||
|
||||
```python
|
||||
from pydantic import Field, field_validator # pydantic v2
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
class CustomTool(BaseTool): # BaseTool is v1 code
|
||||
x: int = Field(default=1)
|
||||
@@ -102,4 +104,4 @@ Tool.from_function( # <-- tool uses v1 namespace
|
||||
description="useful for when you need to answer questions about math",
|
||||
args_schema=CalculatorInput
|
||||
)
|
||||
```
|
||||
```
|
||||
|
||||
@@ -323,7 +323,7 @@
|
||||
"id": "fa0f589d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Routing by semantic similarity\n",
|
||||
"## Routing by semantic similarity\n",
|
||||
"\n",
|
||||
"One especially useful technique is to use embeddings to route a query to the most relevant prompt. Here's an example."
|
||||
]
|
||||
@@ -371,7 +371,7 @@
|
||||
"chain = (\n",
|
||||
" {\"query\": RunnablePassthrough()}\n",
|
||||
" | RunnableLambda(prompt_router)\n",
|
||||
" | ChatAnthropic(model_name=\"claude-3-haiku-20240307\")\n",
|
||||
" | ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
|
||||
78
docs/docs/how_to/runnable_runtime_secrets.ipynb
Normal file
78
docs/docs/how_to/runnable_runtime_secrets.ipynb
Normal file
@@ -0,0 +1,78 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6fcd2994-0092-4fa3-9bb1-c9c84babadc5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass runtime secrets to runnables\n",
|
||||
"\n",
|
||||
":::info Requires `langchain-core >= 0.2.22`\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"We can pass in secrets to our runnables at runtime using the `RunnableConfig`. Specifically we can pass in secrets with a `__` prefix to the `configurable` field. This will ensure that these secrets aren't traced as part of the invocation:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "92e42e91-c277-49de-aa7a-dfb5c993c817",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"7"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def foo(x: int, config: RunnableConfig) -> int:\n",
|
||||
" \"\"\"Sum x and a secret int\"\"\"\n",
|
||||
" return x + config[\"configurable\"][\"__top_secret_int\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"foo.invoke({\"x\": 5}, {\"configurable\": {\"__top_secret_int\": 2, \"traced_key\": \"bar\"}})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae3a4fb9-2ce7-46b2-b654-35dff0ae7197",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace for this run, we can see that \"traced_key\" was recorded (as part of Metadata) while our secret int was not: https://smith.langchain.com/public/aa7e3289-49ca-422d-a408-f6b927210170/r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -297,13 +297,67 @@
|
||||
"print(len(docs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### Gradient\n",
|
||||
"\n",
|
||||
"In this method, the gradient of distance is used to split chunks along with the percentile method.\n",
|
||||
"This method is useful when chunks are highly correlated with each other or specific to a domain e.g. legal or medical. The idea is to apply anomaly detection on gradient array so that the distribution become wider and easy to identify boundaries in highly semantic data."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "423c6e099e94ca69"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1f65472",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"text_splitter = SemanticChunker(\n",
|
||||
" OpenAIEmbeddings(), breakpoint_threshold_type=\"gradient\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = text_splitter.create_documents([state_of_the_union])\n",
|
||||
"print(docs[0].page_content)"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "e9f393d316ce1f6c"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"26\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(len(docs))"
|
||||
],
|
||||
"metadata": {},
|
||||
"id": "a407cd57f02a0db4"
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
305
docs/docs/how_to/serialization.ipynb
Normal file
305
docs/docs/how_to/serialization.ipynb
Normal file
@@ -0,0 +1,305 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ab3dc782-321e-4503-96ee-ac88a15e4b5e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to save and load LangChain objects\n",
|
||||
"\n",
|
||||
"LangChain classes implement standard methods for serialization. Serializing LangChain objects using these methods confer some advantages:\n",
|
||||
"\n",
|
||||
"- Secrets, such as API keys, are separated from other parameters and can be loaded back to the object on de-serialization;\n",
|
||||
"- De-serialization is kept compatible across package versions, so objects that were serialized with one version of LangChain can be properly de-serialized with another.\n",
|
||||
"\n",
|
||||
"To save and load LangChain objects using this system, use the `dumpd`, `dumps`, `load`, and `loads` functions in the [load module](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.load) of `langchain-core`. These functions support JSON and JSON-serializable objects.\n",
|
||||
"\n",
|
||||
"All LangChain objects that inherit from [Serializable](https://api.python.langchain.com/en/latest/load/langchain_core.load.serializable.Serializable.html) are JSON-serializable. Examples include [messages](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.messages), [document objects](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) (e.g., as returned from [retrievers](/docs/concepts/#retrievers)), and most [Runnables](/docs/concepts/#langchain-expression-language-lcel), such as chat models, retrievers, and [chains](/docs/how_to/sequence) implemented with the LangChain Expression Language.\n",
|
||||
"\n",
|
||||
"Below we walk through an example with a simple [LLM chain](/docs/tutorials/llm_chain).\n",
|
||||
"\n",
|
||||
":::{.callout-caution}\n",
|
||||
"\n",
|
||||
"De-serialization using `load` and `loads` can instantiate any serializable LangChain object. Only use this feature with trusted inputs!\n",
|
||||
"\n",
|
||||
"De-serialization is a beta feature and is subject to change.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "f85d9e51-2a36-4f69-83b1-c716cd43f790",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.load import dumpd, dumps, load, loads\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Translate the following into {language}:\"),\n",
|
||||
" (\"user\", \"{text}\"),\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", api_key=\"llm-api-key\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "356ea99f-5cb5-4433-9a6c-2443d2be9ed3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Saving objects\n",
|
||||
"\n",
|
||||
"### To json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "26516764-d46b-4357-a6c6-bd8315bfa530",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"schema\",\n",
|
||||
" \"runnable\",\n",
|
||||
" \"RunnableSequence\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"first\": {\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"prompts\",\n",
|
||||
" \"chat\",\n",
|
||||
" \"ChatPromptTemplate\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"input_variables\": [\n",
|
||||
" \"language\",\n",
|
||||
" \"text\"\n",
|
||||
" ],\n",
|
||||
" \"messages\": [\n",
|
||||
" {\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"string_representation = dumps(chain, pretty=True)\n",
|
||||
"print(string_representation[:500])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bd425716-545d-466b-a4e5-dc9952cfd72a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### To a json-serializable Python dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6561a968-1741-4419-8c29-e705b9d0ef39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dict_representation = dumpd(chain)\n",
|
||||
"\n",
|
||||
"print(type(dict_representation))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "711e986e-dd24-4839-9e38-c57903378a5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### To disk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f818378b-f4d6-43a7-895b-76cf7359b157",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"with open(\"/tmp/chain.json\", \"w\") as fp:\n",
|
||||
" json.dump(string_representation, fp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e621a32-ff5f-4627-ad59-88cacba73c6b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the API key is withheld from the serialized representations. Parameters that are considered secret are specified by the `.lc_secrets` attribute of the LangChain object:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8225e150-000a-4fbc-9f3d-09568f4b560b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'openai_api_key': 'OPENAI_API_KEY'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.last.lc_secrets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d090177-eb1c-4bfb-8c13-29286afe17d9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading objects\n",
|
||||
"\n",
|
||||
"Specifying `secrets_map` in `load` and `loads` will load the corresponding secrets onto the de-serialized LangChain object.\n",
|
||||
"\n",
|
||||
"### From string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "54a66267-5f3a-40a2-bfcc-8b44bb24c154",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = loads(string_representation, secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ed9aff1-92cc-44ba-b2ec-4d12f924fa03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### From dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "76979932-13de-4427-9f88-040fb05a6778",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load(dict_representation, secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7dd81a2a-5163-414d-ab42-f1c35e30471b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### From disk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "033f62a7-3377-472a-be58-718baa6ab445",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"/tmp/chain.json\", \"r\") as fp:\n",
|
||||
" chain = loads(json.load(fp), secrets_map={\"OPENAI_API_KEY\": \"llm-api-key\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dc520fdb-035a-468f-a8a8-c3ffe8ed98eb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we recover the API key specified at the start of the guide:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "566b2475-d9b4-432b-8c3b-27c2f183624e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'llm-api-key'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.last.openai_api_key.get_secret_value()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7b4cba53-e1d5-4979-927e-b5794a02afc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -351,7 +351,7 @@
|
||||
"id": "ab1b2e7c-6ea8-4674-98eb-a43c69f5c19d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To help enforce proper use of our Python tool, we'll using [tool calling](/docs/how_to/tool_calling/):"
|
||||
"To help enforce proper use of our Python tool, we'll using [tool calling](/docs/how_to/tool_calling):"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -243,7 +243,7 @@
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"You are a \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m expert. Given an input question, creat a syntactically correct \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m query to run.\n",
|
||||
"You are a \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m expert. Given an input question, create a syntactically correct \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m query to run.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most \u001b[33;1m\u001b[1;3m{top_k}\u001b[0m results using the LIMIT clause as per \u001b[33;1m\u001b[1;3m{dialect}\u001b[0m. You can order the results to return the most informative data in the database.\n",
|
||||
"Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (\") to denote them as delimited identifiers.\n",
|
||||
"Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n",
|
||||
@@ -275,7 +275,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"\"\"You are a {dialect} expert. Given an input question, creat a syntactically correct {dialect} query to run.\n",
|
||||
"system = \"\"\"You are a {dialect} expert. Given an input question, create a syntactically correct {dialect} query to run.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per {dialect}. You can order the results to return the most informative data in the database.\n",
|
||||
"Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (\") to denote them as delimited identifiers.\n",
|
||||
"Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n",
|
||||
|
||||
@@ -41,6 +41,10 @@
|
||||
"\n",
|
||||
"Let's take a look at both approaches, and try to understand how to use them.\n",
|
||||
"\n",
|
||||
":::info\n",
|
||||
"For a higher-level overview of streaming techniques in LangChain, see [this section of the conceptual guide](/docs/concepts/#streaming).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Using Stream\n",
|
||||
"\n",
|
||||
"All `Runnable` objects implement a sync method called `stream` and an async variant called `astream`. \n",
|
||||
@@ -1003,7 +1007,7 @@
|
||||
"id": "798ea891-997c-454c-bf60-43124f40ee1b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Because both the model and the parser support streaming, we see sreaming events from both components in real time! Kind of cool isn't it? 🦜"
|
||||
"Because both the model and the parser support streaming, we see streaming events from both components in real time! Kind of cool isn't it? 🦜"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"id": "6d55008f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -76,22 +76,22 @@
|
||||
"id": "a808a401-be1f-49f9-ad13-58dd68f7db5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want the model to return a Pydantic object, we just need to pass in desired the Pydantic class:"
|
||||
"If we want the model to return a Pydantic object, we just need to pass in the desired Pydantic class:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"execution_count": 3,
|
||||
"id": "070bf702",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)"
|
||||
"Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=8)"
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -250,7 +250,7 @@
|
||||
"id": "e28c14d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, you can use tool calling directly to allow the model to choose between options, if your [chosen model supports it](/docs/integrations/chat/). This involves a bit more parsing and setup but in some instances leads to better performance because you don't have to use nested schemas. See [this how-to guide](/docs/how_to/tool_calling/) for more details."
|
||||
"Alternatively, you can use tool calling directly to allow the model to choose between options, if your [chosen model supports it](/docs/integrations/chat/). This involves a bit more parsing and setup but in some instances leads to better performance because you don't have to use nested schemas. See [this how-to guide](/docs/how_to/tool_calling) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -514,12 +514,49 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "91e95aa2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### (Advanced) Raw outputs\n",
|
||||
"\n",
|
||||
"LLMs aren't perfect at generating structured output, especially as schemas become complex. You can avoid raising exceptions and handle the raw output yourself by passing `include_raw=True`. This changes the output format to contain the raw message output, the `parsed` value (if successful), and any resulting errors:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "10ed2842",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ASK4EmZeZ69Fi3p554Mb4rWy', 'function': {'arguments': '{\"setup\":\"Why was the cat sitting on the computer?\",\"punchline\":\"Because it wanted to keep an eye on the mouse!\"}', 'name': 'Joke'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 36, 'prompt_tokens': 107, 'total_tokens': 143}, 'model_name': 'gpt-4-0125-preview', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-6491d35b-9164-4656-b75c-d7882cfb76cb-0', tool_calls=[{'name': 'Joke', 'args': {'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye on the mouse!'}, 'id': 'call_ASK4EmZeZ69Fi3p554Mb4rWy'}], usage_metadata={'input_tokens': 107, 'output_tokens': 36, 'total_tokens': 143}),\n",
|
||||
" 'parsed': Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=None),\n",
|
||||
" 'parsing_error': None}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"structured_llm = llm.with_structured_output(Joke, include_raw=True)\n",
|
||||
"\n",
|
||||
"structured_llm.invoke(\n",
|
||||
" \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5e92a98a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prompting and parsing model directly\n",
|
||||
"## Prompting and parsing model outputs directly\n",
|
||||
"\n",
|
||||
"Not all models support `.with_structured_output()`, since not all models have tool calling or JSON mode support. For such models you'll need to directly prompt the model to use a specific format, and use an output parser to extract the structured response from the raw model output.\n",
|
||||
"\n",
|
||||
@@ -787,9 +824,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -801,7 +838,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
396
docs/docs/how_to/tool_artifacts.ipynb
Normal file
396
docs/docs/how_to/tool_artifacts.ipynb
Normal file
@@ -0,0 +1,396 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "503e36ae-ca62-4f8a-880c-4fe78ff5df93",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to return artifacts from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [ToolMessage](/docs/concepts/#toolmessage)\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Tools are utilities that can be called by a model, and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns a custom object, a dataframe or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
|
||||
"\n",
|
||||
"The Tool and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-core >= 0.2.19``\n",
|
||||
"\n",
|
||||
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Defining the tool\n",
|
||||
"\n",
|
||||
"If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format=\"content_and_artifact\"` when defining our tool and make sure that we return a tuple of (content, artifact):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "762b9199-885f-4946-9c98-cc54d72b0d76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU \"langchain-core>=0.2.19\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b9eb179d-1f41-4748-9866-b3d3e8c73cd0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random\n",
|
||||
"from typing import List, Tuple\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(response_format=\"content_and_artifact\")\n",
|
||||
"def generate_random_ints(min: int, max: int, size: int) -> Tuple[str, List[int]]:\n",
|
||||
" \"\"\"Generate size random ints in the range [min, max].\"\"\"\n",
|
||||
" array = [random.randint(min, max) for _ in range(size)]\n",
|
||||
" content = f\"Successfully generated array of {size} random ints in [{min}, {max}].\"\n",
|
||||
" return content, array"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ab05d25-af4a-4e5a-afe2-f090416d7ee7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invoking the tool with ToolCall\n",
|
||||
"\n",
|
||||
"If we directly invoke our tool with just the tool arguments, you'll notice that we only get back the content part of the Tool output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5e7d5e77-3102-4a59-8ade-e4e699dd1817",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Successfully generated array of 10 random ints in [0, 9].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Failed to batch ingest runs: LangSmithRateLimitError('Rate limit exceeded for https://api.smith.langchain.com/runs/batch. HTTPError(\\'429 Client Error: Too Many Requests for url: https://api.smith.langchain.com/runs/batch\\', \\'{\"detail\":\"Monthly unique traces usage limit exceeded\"}\\')')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke({\"min\": 0, \"max\": 9, \"size\": 10})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "30db7228-f04c-489e-afda-9a572eaa90a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to get back both the content and the artifact, we need to invoke our model with a ToolCall (which is just a dictionary with \"name\", \"args\", \"id\" and \"type\" keys), which has additional info needed to generate a ToolMessage like the tool call ID:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "da1d939d-a900-4b01-92aa-d19011a6b034",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[2, 8, 0, 6, 0, 0, 1, 5, 0, 0])"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke(\n",
|
||||
" {\n",
|
||||
" \"name\": \"generate_random_ints\",\n",
|
||||
" \"args\": {\"min\": 0, \"max\": 9, \"size\": 10},\n",
|
||||
" \"id\": \"123\", # required\n",
|
||||
" \"type\": \"tool_call\", # required\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a3cfc03d-020b-42c7-b0f8-c824af19e45e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using with a model\n",
|
||||
"\n",
|
||||
"With a [tool-calling model](/docs/how_to/tool_calling/), we can easily use a model to call our Tool and generate ToolMessages:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "74de0286-b003-4b48-9cdd-ecab435515ca",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | echo: false\n",
|
||||
"# | output: false\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "8a67424b-d19c-43df-ac7b-690bca42146c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'generate_random_ints',\n",
|
||||
" 'args': {'min': 1, 'max': 24, 'size': 6},\n",
|
||||
" 'id': 'toolu_01EtALY3Wz1DVYhv1TLvZGvE',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools([generate_random_ints])\n",
|
||||
"\n",
|
||||
"ai_msg = llm_with_tools.invoke(\"generate 6 positive ints less than 25\")\n",
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "00c4e906-3ca8-41e8-a0be-65cb0db7d574",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Successfully generated array of 6 random ints in [1, 24].', name='generate_random_ints', tool_call_id='toolu_01EtALY3Wz1DVYhv1TLvZGvE', artifact=[2, 20, 23, 8, 1, 15])"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke(ai_msg.tool_calls[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ddef2690-70de-4542-ab20-2337f77f3e46",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we just pass in the tool call args, we'll only get back the content:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "f4a6c9a6-0ffc-4b0e-a59f-f3c3d69d824d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Successfully generated array of 6 random ints in [1, 24].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_random_ints.invoke(ai_msg.tool_calls[0][\"args\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98d6443b-ff41-4d91-8523-b6274fc74ee5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we wanted to declaratively create a chain, we could do this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "eb55ec23-95a4-464e-b886-d9679bf3aaa2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ToolMessage(content='Successfully generated array of 1 random ints in [1, 5].', name='generate_random_ints', tool_call_id='toolu_01FwYhnkwDPJPbKdGq4ng6uD', artifact=[5])]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from operator import attrgetter\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | attrgetter(\"tool_calls\") | generate_random_ints.map()\n",
|
||||
"\n",
|
||||
"chain.invoke(\"give me a random number between 1 and 5\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4df46be2-babb-4bfe-a641-91cd3d03ffaf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating from BaseTool class\n",
|
||||
"\n",
|
||||
"If you want to create a BaseTool object directly, instead of decorating a function with `@tool`, you can do so like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9a9129e1-6aee-4a10-ad57-62ef3bf0276c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GenerateRandomFloats(BaseTool):\n",
|
||||
" name: str = \"generate_random_floats\"\n",
|
||||
" description: str = \"Generate size random floats in the range [min, max].\"\n",
|
||||
" response_format: str = \"content_and_artifact\"\n",
|
||||
"\n",
|
||||
" ndigits: int = 2\n",
|
||||
"\n",
|
||||
" def _run(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
|
||||
" range_ = max - min\n",
|
||||
" array = [\n",
|
||||
" round(min + (range_ * random.random()), ndigits=self.ndigits)\n",
|
||||
" for _ in range(size)\n",
|
||||
" ]\n",
|
||||
" content = f\"Generated {size} floats in [{min}, {max}], rounded to {self.ndigits} decimals.\"\n",
|
||||
" return content, array\n",
|
||||
"\n",
|
||||
" # Optionally define an equivalent async method\n",
|
||||
"\n",
|
||||
" # async def _arun(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
|
||||
" # ..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d7322619-f420-4b29-8ee5-023e693d0179",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rand_gen = GenerateRandomFloats(ndigits=4)\n",
|
||||
"rand_gen.invoke({\"min\": 0.1, \"max\": 3.3333, \"size\": 3})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0892f277-23a6-4bb8-a0e9-59f533ac9750",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.5789, 2.464, 2.2719])"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rand_gen.invoke(\n",
|
||||
" {\n",
|
||||
" \"name\": \"generate_random_floats\",\n",
|
||||
" \"args\": {\"min\": 0.1, \"max\": 3.3333, \"size\": 3},\n",
|
||||
" \"id\": \"123\",\n",
|
||||
" \"type\": \"tool_call\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,16 +1,30 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [tool calling, tool call]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use a model to call tools\n",
|
||||
"# How to use chat models to call tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [Output parsers](/docs/concepts/#output-parsers)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
@@ -38,6 +52,12 @@
|
||||
"parameters matching the desired schema, then treat the generated output as your final \n",
|
||||
"result.\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"If you only need formatted values, try the [.with_structured_output()](/docs/how_to/structured_output/#the-with_structured_output-method) chat model method as a simpler entrypoint.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"However, tool calling goes beyond [structured output](/docs/how_to/structured_output/)\n",
|
||||
"since you can pass responses from called tools back to the model to create longer interactions.\n",
|
||||
"For instance, given a search engine tool, an LLM might handle a \n",
|
||||
@@ -52,35 +72,34 @@
|
||||
"support variants of a tool calling feature.\n",
|
||||
"\n",
|
||||
"LangChain implements standard interfaces for defining tools, passing them to LLMs, \n",
|
||||
"and representing tool calls. This guide will show you how to use them.\n",
|
||||
"\n",
|
||||
"and representing tool calls. This guide and the other How-to pages in the Tool section will show you how to use tools with LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Passing tools to chat models\n",
|
||||
"\n",
|
||||
"Chat models that support tool calling features implement a `.bind_tools` method, which \n",
|
||||
"receives a list of LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
|
||||
"receives a list of functions, Pydantic models, or LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
|
||||
"and binds them to the chat model in its expected format. Subsequent invocations of the \n",
|
||||
"chat model will include tool schemas in its calls to the LLM.\n",
|
||||
"\n",
|
||||
"For example, we can define the schema for custom tools using the `@tool` decorator \n",
|
||||
"on Python functions:"
|
||||
"For example, below we implement simple tools for arithmetic:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
@@ -93,12 +112,14 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Or below, we define the schema using [Pydantic](https://docs.pydantic.dev):"
|
||||
"LangChain also implements a `@tool` decorator that allows for further control of the tool schema, such as tool names and argument descriptions. See the how-to guide [here](/docs/how_to/custom_tools/#creating-tools-from-functions) for detail.\n",
|
||||
"\n",
|
||||
"We can also define the schema using [Pydantic](https://docs.pydantic.dev):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -153,7 +174,7 @@
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"%pip install -qU langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -167,81 +188,33 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also use the `tool_choice` parameter to ensure certain behavior. For example, we can force our tool to call the multiply tool by using the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_g4RuAijtDcSeM96jXyCuiLSN', 'function': {'arguments': '{\"a\":3,\"b\":12}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 95, 'total_tokens': 113}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-5157d15a-7e0e-4ab1-af48-3d98010cd152-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_g4RuAijtDcSeM96jXyCuiLSN'}], usage_metadata={'input_tokens': 95, 'output_tokens': 18, 'total_tokens': 113})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_multiply = llm.bind_tools(tools, tool_choice=\"Multiply\")\n",
|
||||
"llm_forced_to_multiply.invoke(\"what is 2 + 4\")"
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"\n",
|
||||
"query = \"What is 3 * 12?\"\n",
|
||||
"\n",
|
||||
"llm_with_tools.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Even if we pass it something that doesn't require multiplcation - it will still call the tool!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" which is OpenAI specific) keyword to the `tool_choice` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_use_tool = llm.bind_tools(tools, tool_choice=\"any\")\n",
|
||||
"llm_forced_to_use_tool.invoke(\"What day is today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, even though the prompt didn't really suggest a tool call, our LLM made one since it was forced to do so. You can look at the docs for [`bind_tool`](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.BaseChatOpenAI.html#langchain_openai.chat_models.base.BaseChatOpenAI.bind_tools) to learn about all the ways to customize how your LLM selects tools."
|
||||
"As we can see, even though the prompt didn't really suggest a tool call, our LLM made one since it was forced to do so. You can look at the docs for [bind_tools()](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.BaseChatOpenAI.html#langchain_openai.chat_models.base.BaseChatOpenAI.bind_tools) to learn about all the ways to customize how your LLM selects tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -273,10 +246,10 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 3, 'b': 12},\n",
|
||||
" 'id': 'call_KquHA7mSbgtAkpkmRPaFnJKa'},\n",
|
||||
" 'id': 'call_TnadLbWJu9HwDULRb51RNSMw'},\n",
|
||||
" {'name': 'Add',\n",
|
||||
" 'args': {'a': 11, 'b': 49},\n",
|
||||
" 'id': 'call_Fl0hQi4IBTzlpaJYlM5kPQhE'}]"
|
||||
" 'id': 'call_Q9vt1up05sOQScXvUYWzSpCg'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -302,7 +275,8 @@
|
||||
"a name, string arguments, identifier, and error message.\n",
|
||||
"\n",
|
||||
"If desired, [output parsers](/docs/how_to#output-parsers) can further \n",
|
||||
"process the output. For example, we can convert back to the original Pydantic class:"
|
||||
"process the output. For example, we can convert existing values populated on the `.tool_calls` attribute back to the original Pydantic class using the\n",
|
||||
"[PydanticToolsParser](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.openai_tools.PydanticToolsParser.html):"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -322,443 +296,27 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain_core.output_parsers import PydanticToolsParser\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | PydanticToolsParser(tools=[Multiply, Add])\n",
|
||||
"chain.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"When tools are called in a streaming context, \n",
|
||||
"[message chunks](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"will be populated with [tool call chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n",
|
||||
"objects in a list via the `.tool_call_chunks` attribute. A `ToolCallChunk` includes \n",
|
||||
"optional string fields for the tool `name`, `args`, and `id`, and includes an optional \n",
|
||||
"integer field `index` that can be used to join chunks together. Fields are optional \n",
|
||||
"because portions of a tool call may be streamed across different chunks (e.g., a chunk \n",
|
||||
"that includes a substring of the arguments may have null values for the tool name and id).\n",
|
||||
"\n",
|
||||
"Because message chunks inherit from their parent message class, an \n",
|
||||
"[AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"with tool call chunks will also include `.tool_calls` and `.invalid_tool_calls` fields. \n",
|
||||
"These fields are parsed best-effort from the message's tool call chunks.\n",
|
||||
"\n",
|
||||
"Note that not all providers currently support streaming for tool calls:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_3aQwTP9CYlFxwOvQZPHDu6wL', 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': ': 3, ', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '\"b\": 1', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '2}', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': 'Add', 'args': '', 'id': 'call_SQUoSsJz2p9Kx2x73GOgN1ja', 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ': 11,', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ' \"b\": ', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '49}', 'id': None, 'index': 1}]\n",
|
||||
"[]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" print(chunk.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that adding message chunks will merge their corresponding tool call chunks. This is the principle by which LangChain's various [tool output parsers](/docs/how_to/output_parser_structured) support streaming.\n",
|
||||
"\n",
|
||||
"For example, below we accumulate tool call chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\"', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, ', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 1', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\"', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11,', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": ', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'str'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_call_chunks[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And below we accumulate tool calls to demonstrate partial parsing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': {}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 1}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_calls)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_calls[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Passing tool outputs to the model\n",
|
||||
"\n",
|
||||
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='What is 3 * 12? Also, what is 11 + 49?'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_svc2GLSxNFALbaCAbSjMI9J8', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'Multiply'}, 'type': 'function'}, {'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 50, 'prompt_tokens': 105, 'total_tokens': 155}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a79ad1dd-95f1-4a46-b688-4c83f327a7b3-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_svc2GLSxNFALbaCAbSjMI9J8'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh'}]),\n",
|
||||
" ToolMessage(content='36', tool_call_id='call_svc2GLSxNFALbaCAbSjMI9J8'),\n",
|
||||
" ToolMessage(content='60', tool_call_id='call_r8jxte3zW6h3MEGV3zH2qzFh')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, ToolMessage\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)\n",
|
||||
"messages.append(ai_msg)\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
"messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 171, 'total_tokens': 189}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None}, id='run-20b52149-e00d-48ea-97cf-f8de7a255f8c-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we pass back the same `id` in the `ToolMessage` as the what we receive from the model in order to help the model match tool responses with tool calls.\n",
|
||||
"\n",
|
||||
"## Few-shot prompting\n",
|
||||
"\n",
|
||||
"For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt.\n",
|
||||
"\n",
|
||||
"For example, even with some special instructions our model can get tripped up by order of operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_T88XN6ECucTgbXXkyDeC2CQj'},\n",
|
||||
" {'name': 'Add',\n",
|
||||
" 'args': {'a': 952, 'b': -20},\n",
|
||||
" 'id': 'call_licdlmGsRqzup8rhqJSb1yZ4'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"Whats 119 times 8 minus 20. Don't do any math yourself, only use tools for math. Respect order of operations\"\n",
|
||||
").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The model shouldn't be trying to add anything yet, since it technically can't know the results of 119 * 8 yet.\n",
|
||||
"\n",
|
||||
"By adding a prompt with some examples we can correct this behavior:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_9MvuwQqg7dlJupJcoTWiEsDo'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" HumanMessage(\n",
|
||||
" \"What's the product of 317253 and 128472 plus four\", name=\"example_user\"\n",
|
||||
" ),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[\n",
|
||||
" {\"name\": \"Multiply\", \"args\": {\"x\": 317253, \"y\": 128472}, \"id\": \"1\"}\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054784\", tool_call_id=\"1\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[{\"name\": \"Add\", \"args\": {\"x\": 16505054784, \"y\": 4}, \"id\": \"2\"}],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054788\", tool_call_id=\"2\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"The product of 317253 and 128472 plus four is 16505054788\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"system = \"\"\"You are bad at math but are an expert at using a calculator. \n",
|
||||
"\n",
|
||||
"Use past tool usage as an example of how to correctly use the tools.\"\"\"\n",
|
||||
"few_shot_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", system),\n",
|
||||
" *examples,\n",
|
||||
" (\"human\", \"{query}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = {\"query\": RunnablePassthrough()} | few_shot_prompt | llm_with_tools\n",
|
||||
"chain.invoke(\"Whats 119 times 8 minus 20\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we get the correct output this time.\n",
|
||||
"\n",
|
||||
"Here's what the [LangSmith trace](https://smith.langchain.com/public/f70550a1-585f-4c9d-a643-13148ab1616f/r) looks like."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Binding model-specific formats (advanced)\n",
|
||||
"\n",
|
||||
"Providers adopt different conventions for formatting tool schemas. \n",
|
||||
"For instance, OpenAI uses a format like this:\n",
|
||||
"\n",
|
||||
"- `type`: The type of the tool. At the time of writing, this is always `\"function\"`.\n",
|
||||
"- `function`: An object containing tool parameters.\n",
|
||||
"- `function.name`: The name of the schema to output.\n",
|
||||
"- `function.description`: A high level description of the schema to output.\n",
|
||||
"- `function.parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict.\n",
|
||||
"\n",
|
||||
"We can bind this model-specific format directly to the model as well if preferred. Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe', 'function': {'arguments': '{\"a\":119,\"b\":8}', 'name': 'multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 62, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_c2295e73ad', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-353e8a9a-7125-4f94-8c68-4f3da4c21120-0', tool_calls=[{'name': 'multiply', 'args': {'a': 119, 'b': 8}, 'id': 'call_mn4ELw1NbuE0DFYhIeK0GrPe'}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"model_with_tools = model.bind(\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"multiply\",\n",
|
||||
" \"description\": \"Multiply two integers together.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"a\": {\"type\": \"number\", \"description\": \"First integer\"},\n",
|
||||
" \"b\": {\"type\": \"number\", \"description\": \"Second integer\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"a\", \"b\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model_with_tools.invoke(\"Whats 119 times 8?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is functionally equivalent to the `bind_tools()` calls above."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Now you've learned how to bind tool schemas to a chat model and to call those tools. Next, check out some more specific uses of tool calling:\n",
|
||||
"Now you've learned how to bind tool schemas to a chat model and to call those tools. Next, you can learn more about how to use tools:\n",
|
||||
"\n",
|
||||
"- Few shot promting [with tools](/docs/how_to/tools_few_shot/)\n",
|
||||
"- Stream [tool calls](/docs/how_to/tool_streaming/)\n",
|
||||
"- Bind [model-specific tools](/docs/how_to/tools_model_specific/)\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"- Pass [tool results back to model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
@@ -781,7 +339,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
128
docs/docs/how_to/tool_calling_parallel.ipynb
Normal file
128
docs/docs/how_to/tool_calling_parallel.ipynb
Normal file
@@ -0,0 +1,128 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to disable parallel tool calling\n",
|
||||
"\n",
|
||||
":::info OpenAI-specific\n",
|
||||
"\n",
|
||||
"This API is currently only supported by OpenAI.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"OpenAI tool calling performs tool calling in parallel by default. That means that if we ask a question like \"What is the weather in Tokyo, New York, and Chicago?\" and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the ``parallel_tool_call`` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First let's set up our tools and model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's show a quick example of how disabling parallel tool calls work:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'add',\n",
|
||||
" 'args': {'a': 2, 'b': 2},\n",
|
||||
" 'id': 'call_Hh4JOTCDM85Sm9Pr84VKrWu5'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)\n",
|
||||
"llm_with_tools.invoke(\"Please call the first tool two times\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As we can see, even though we explicitly told the model to call a tool twice, by disabling parallel tool calls the model was constrained to only calling one."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
148
docs/docs/how_to/tool_choice.ipynb
Normal file
148
docs/docs/how_to/tool_choice.ipynb
Normal file
@@ -0,0 +1,148 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to force models to call a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In order to force our LLM to spelect a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For example, we can force our tool to call the multiply tool by using the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_9cViskmLvPnHjXk9tbVla5HA', 'function': {'arguments': '{\"a\":2,\"b\":4}', 'name': 'Multiply'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 103, 'total_tokens': 112}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-095b827e-2bdd-43bb-8897-c843f4504883-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 2, 'b': 4}, 'id': 'call_9cViskmLvPnHjXk9tbVla5HA'}], usage_metadata={'input_tokens': 103, 'output_tokens': 9, 'total_tokens': 112})"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_multiply = llm.bind_tools(tools, tool_choice=\"Multiply\")\n",
|
||||
"llm_forced_to_multiply.invoke(\"what is 2 + 4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Even if we pass it something that doesn't require multiplcation - it will still call the tool!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also just force our tool to select at least one of our tools by passing in the \"any\" (or \"required\" which is OpenAI specific) keyword to the `tool_choice` parameter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W', 'function': {'arguments': '{\"a\":1,\"b\":2}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 94, 'total_tokens': 109}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-28f75260-9900-4bed-8cd3-f1579abb65e5-0', tool_calls=[{'name': 'Add', 'args': {'a': 1, 'b': 2}, 'id': 'call_mCSiJntCwHJUBfaHZVUB2D8W'}], usage_metadata={'input_tokens': 94, 'output_tokens': 15, 'total_tokens': 109})"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_forced_to_use_tool = llm.bind_tools(tools, tool_choice=\"any\")\n",
|
||||
"llm_forced_to_use_tool.invoke(\"What day is today?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
132
docs/docs/how_to/tool_configure.ipynb
Normal file
132
docs/docs/how_to/tool_configure.ipynb
Normal file
@@ -0,0 +1,132 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to access the RunnableConfig from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [Custom tools](/docs/how_to/custom_tools)\n",
|
||||
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language-lcel)\n",
|
||||
"- [Configuring runnable behavior](/docs/how_to/configure/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you have a tool that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n",
|
||||
"\n",
|
||||
"Tools are runnables, and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html) object. This guide show you some examples of how to do that.\n",
|
||||
"\n",
|
||||
":::caution Compatibility\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-core>=0.2.16`.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Inferring by parameter type\n",
|
||||
"\n",
|
||||
"To access reference the active config object from your custom tool, you'll need to add a parameter to your tool's signature typed as `RunnableConfig`. When you invoke your tool, LangChain will inspect your tool's signature, look for a parameter typed as `RunnableConfig`, and if it exists, populate that parameter with the correct value.\n",
|
||||
"\n",
|
||||
"**Note:** The actual name of the parameter doesn't matter, only the typing.\n",
|
||||
"\n",
|
||||
"To illustrate this, define a custom tool that takes a two parameters - one typed as a string, the other typed as `RunnableConfig`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_core"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"async def reverse_tool(text: str, special_config_param: RunnableConfig) -> str:\n",
|
||||
" \"\"\"A test tool that combines input text with a configurable parameter.\"\"\"\n",
|
||||
" return (text + special_config_param[\"configurable\"][\"additional_field\"])[::-1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then, if we invoke the tool with a `config` containing a `configurable` field, we can see that `additional_field` is passed through correctly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'321cba'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await reverse_tool.ainvoke(\n",
|
||||
" {\"text\": \"abc\"}, config={\"configurable\": {\"additional_field\": \"123\"}}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now seen how to configure and stream events from within a tool. Next, check out the following guides for more on using tools:\n",
|
||||
"\n",
|
||||
"- [Stream events from child runs within a custom tool](/docs/how_to/tool_stream_events/)\n",
|
||||
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
157
docs/docs/how_to/tool_results_pass_to_model.ipynb
Normal file
157
docs/docs/how_to/tool_results_pass_to_model.ipynb
Normal file
@@ -0,0 +1,157 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass tool outputs to chat models\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Tools](/docs/concepts/#tools)\n",
|
||||
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s and `ToolCall`s. First, let's define our tools and our model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The nice thing about Tools is that if we invoke them with a ToolCall, we'll automatically get back a ToolMessage that can be fed back to the model: \n",
|
||||
"\n",
|
||||
":::info Requires ``langchain-core >= 0.2.19``\n",
|
||||
"\n",
|
||||
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='What is 3 * 12? Also, what is 11 + 49?'),\n",
|
||||
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Smg3NHJNxrKfAmd4f9GkaYn3', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'multiply'}, 'type': 'function'}, {'id': 'call_55K1C0DmH6U5qh810gW34xZ0', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 49, 'prompt_tokens': 88, 'total_tokens': 137}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-56657feb-96dd-456c-ab8e-1857eab2ade0-0', tool_calls=[{'name': 'multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_Smg3NHJNxrKfAmd4f9GkaYn3', 'type': 'tool_call'}, {'name': 'add', 'args': {'a': 11, 'b': 49}, 'id': 'call_55K1C0DmH6U5qh810gW34xZ0', 'type': 'tool_call'}], usage_metadata={'input_tokens': 88, 'output_tokens': 49, 'total_tokens': 137}),\n",
|
||||
" ToolMessage(content='36', name='multiply', tool_call_id='call_Smg3NHJNxrKfAmd4f9GkaYn3'),\n",
|
||||
" ToolMessage(content='60', name='add', tool_call_id='call_55K1C0DmH6U5qh810gW34xZ0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, ToolMessage\n",
|
||||
"\n",
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(query)]\n",
|
||||
"ai_msg = llm_with_tools.invoke(messages)\n",
|
||||
"messages.append(ai_msg)\n",
|
||||
"for tool_call in ai_msg.tool_calls:\n",
|
||||
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_msg = selected_tool.invoke(tool_call)\n",
|
||||
" messages.append(tool_msg)\n",
|
||||
"messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 153, 'total_tokens': 171}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-ba5032f0-f773-406d-a408-8314e66511d0-0', usage_metadata={'input_tokens': 153, 'output_tokens': 18, 'total_tokens': 171})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we pass back the same `id` in the `ToolMessage` as the what we receive from the model in order to help the model match tool responses with tool calls."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass run time values to a tool\n",
|
||||
"# How to pass run time values to tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
@@ -12,29 +12,28 @@
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to create tools](/docs/how_to/custom_tools)\n",
|
||||
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/)\n",
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
"\n",
|
||||
"This how-to guide uses models with native tool calling capability.\n",
|
||||
"You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Using with LangGraph\n",
|
||||
":::info Using with LangGraph\n",
|
||||
"\n",
|
||||
"If you're using LangGraph, please refer to [this how-to guide](https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/)\n",
|
||||
"which shows how to create an agent that keeps track of a given user's favorite pets.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::caution Added in `langchain-core==0.2.21`\n",
|
||||
"\n",
|
||||
"Must have `langchain-core>=0.2.21` to use this functionality.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n",
|
||||
"\n",
|
||||
"Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n",
|
||||
"\n",
|
||||
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.\n",
|
||||
"\n",
|
||||
"This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values."
|
||||
"This how-to guide shows you how to prevent the model from generating certain tool arguments and injecting them in directly at runtime."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -57,23 +56,12 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"# %pip install -qU langchain langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -90,10 +78,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Passing request time information\n",
|
||||
"## Hiding arguments from the model\n",
|
||||
"\n",
|
||||
"The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n",
|
||||
"this information may be the user ID as resolved from the request itself."
|
||||
"We can use the InjectedToolArg annotation to mark certain parameters of our Tool, like `user_id` as being injected at runtime, meaning they shouldn't be generated by the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -104,46 +91,88 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_core.tools import BaseTool, tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import InjectedToolArg, tool\n",
|
||||
"from typing_extensions import Annotated\n",
|
||||
"\n",
|
||||
"user_to_pets = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_tools_for_user(user_id: str) -> List[BaseTool]:\n",
|
||||
" \"\"\"Generate a set of tools that have a user id associated with them.\"\"\"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def update_favorite_pets(\n",
|
||||
" pets: List[str], user_id: Annotated[str, InjectedToolArg]\n",
|
||||
") -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def update_favorite_pets(pets: List[str]) -> None:\n",
|
||||
" \"\"\"Add the list of favorite pets.\"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
" Args:\n",
|
||||
" pets: List of favorite pets to set.\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def delete_favorite_pets() -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def list_favorite_pets() -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def delete_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" \"\"\"Delete the list of favorite pets.\n",
|
||||
"\n",
|
||||
" return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]"
|
||||
" Args:\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" if user_id in user_to_pets:\n",
|
||||
" del user_to_pets[user_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(parse_docstring=True)\n",
|
||||
"def list_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id: User's ID.\n",
|
||||
" \"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Verify that the tools work correctly"
|
||||
"If we look at the input schemas for these tools, we'll see that user_id is still listed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_petsSchema',\n",
|
||||
" 'description': 'Add the list of favorite pets.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But if we look at the tool call schema, which is what is passed to the model for tool-calling, user_id has been removed:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -152,46 +181,60 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'eugene': ['cat', 'dog']}\n",
|
||||
"['cat', 'dog']\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Add the list of favorite pets.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_pets, delete_pets, list_pets = generate_tools_for_user(\"eugene\")\n",
|
||||
"update_pets.invoke({\"pets\": [\"cat\", \"dog\"]})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_pets.invoke({}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_run_time_request(user_id: str, query: str):\n",
|
||||
" \"\"\"Handle run time request.\"\"\"\n",
|
||||
" tools = generate_tools_for_user(user_id)\n",
|
||||
" llm_with_tools = llm.bind_tools(tools)\n",
|
||||
" prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", \"You are a helpful assistant.\")],\n",
|
||||
" )\n",
|
||||
" chain = prompt | llm_with_tools\n",
|
||||
" return llm_with_tools.invoke(query)"
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists!"
|
||||
"So when we invoke our tool, we need to pass in user_id:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'123': ['lizard', 'dog']}\n",
|
||||
"['lizard', 'dog']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"user_id = \"123\"\n",
|
||||
"update_favorite_pets.invoke({\"pets\": [\"lizard\", \"dog\"], \"user_id\": user_id})\n",
|
||||
"print(user_to_pets)\n",
|
||||
"print(list_favorite_pets.invoke({\"user_id\": user_id}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But when the model calls the tool, no user_id argument will be generated:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -204,7 +247,8 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots']},\n",
|
||||
" 'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]"
|
||||
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
@@ -213,30 +257,349 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_message = handle_run_time_request(\n",
|
||||
" \"eugene\", \"my favorite animals are cats and parrots.\"\n",
|
||||
")\n",
|
||||
"ai_message.tool_calls"
|
||||
"tools = [\n",
|
||||
" update_favorite_pets,\n",
|
||||
" delete_favorite_pets,\n",
|
||||
" list_favorite_pets,\n",
|
||||
"]\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)\n",
|
||||
"ai_msg = llm_with_tools.invoke(\"my favorite animals are cats and parrots\")\n",
|
||||
"ai_msg.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::{.callout-important}\n",
|
||||
"## Injecting arguments at runtime"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to actually execute our tools using the model-generated tool call, we'll need to inject the user_id ourselves:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots'], 'user_id': '123'},\n",
|
||||
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from copy import deepcopy\n",
|
||||
"\n",
|
||||
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
|
||||
"from langchain_core.runnables import chain\n",
|
||||
"\n",
|
||||
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling/).\n",
|
||||
":::"
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def inject_user_id(ai_msg):\n",
|
||||
" tool_calls = []\n",
|
||||
" for tool_call in ai_msg.tool_calls:\n",
|
||||
" tool_call_copy = deepcopy(tool_call)\n",
|
||||
" tool_call_copy[\"args\"][\"user_id\"] = user_id\n",
|
||||
" tool_calls.append(tool_call_copy)\n",
|
||||
" return tool_calls\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"inject_user_id.invoke(ai_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now we can chain together our model, injection code, and the actual tools to create a tool-executing chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ToolMessage(content='null', name='update_favorite_pets', tool_call_id='call_HUyF6AihqANzEYxQnTUKxkXj')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool_map = {tool.name: tool for tool in tools}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def tool_router(tool_call):\n",
|
||||
" return tool_map[tool_call[\"name\"]]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | inject_user_id | tool_router.map()\n",
|
||||
"chain.invoke(\"my favorite animals are cats and parrots\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the user_to_pets dict, we can see that it's been updated to include cats and parrots:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'123': ['cats', 'parrots']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"user_to_pets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Other ways of annotating args\n",
|
||||
"\n",
|
||||
"Here are a few other ways of annotating our tool args:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'UpdateFavoritePetsSchema',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class UpdateFavoritePetsSchema(BaseModel):\n",
|
||||
" \"\"\"Update list of favorite pets\"\"\"\n",
|
||||
"\n",
|
||||
" pets: List[str] = Field(..., description=\"List of favorite pets to set.\")\n",
|
||||
" user_id: Annotated[str, InjectedToolArg] = Field(..., description=\"User's ID.\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(args_schema=UpdateFavoritePetsSchema)\n",
|
||||
"def update_favorite_pets(pets, user_id):\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"update_favorite_pets.get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"update_favorite_pets.tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'UpdateFavoritePetsSchema',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id',\n",
|
||||
" 'description': \"User's ID.\",\n",
|
||||
" 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class UpdateFavoritePets(BaseTool):\n",
|
||||
" name: str = \"update_favorite_pets\"\n",
|
||||
" description: str = \"Update list of favorite pets\"\n",
|
||||
" args_schema: Optional[Type[BaseModel]] = UpdateFavoritePetsSchema\n",
|
||||
"\n",
|
||||
" def _run(self, pets, user_id):\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets().get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'description': 'List of favorite pets to set.',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets().tool_call_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_petsSchema',\n",
|
||||
" 'description': 'Use the tool.\\n\\nAdd run_manager: Optional[CallbackManagerForToolRun] = None\\nto child implementations to enable tracing.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}},\n",
|
||||
" 'user_id': {'title': 'User Id', 'type': 'string'}},\n",
|
||||
" 'required': ['pets', 'user_id']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class UpdateFavoritePets2(BaseTool):\n",
|
||||
" name: str = \"update_favorite_pets\"\n",
|
||||
" description: str = \"Update list of favorite pets\"\n",
|
||||
"\n",
|
||||
" def _run(self, pets: List[str], user_id: Annotated[str, InjectedToolArg]) -> None:\n",
|
||||
" user_to_pets[user_id] = pets\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"UpdateFavoritePets2().get_input_schema().schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'update_favorite_pets',\n",
|
||||
" 'description': 'Update list of favorite pets',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'pets': {'title': 'Pets',\n",
|
||||
" 'type': 'array',\n",
|
||||
" 'items': {'type': 'string'}}},\n",
|
||||
" 'required': ['pets']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"UpdateFavoritePets2().tool_call_schema.schema()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "poetry-venv-311",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "poetry-venv-311"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -248,7 +611,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
302
docs/docs/how_to/tool_stream_events.ipynb
Normal file
302
docs/docs/how_to/tool_stream_events.ipynb
Normal file
@@ -0,0 +1,302 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to stream events from a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [Custom tools](/docs/how_to/custom_tools)\n",
|
||||
"- [Using stream events](/docs/how_to/streaming/#using-stream-events)\n",
|
||||
"- [Accessing RunnableConfig within a custom tool](/docs/how_to/tool_configure/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you have tools that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n",
|
||||
"\n",
|
||||
":::caution Compatibility\n",
|
||||
"\n",
|
||||
"LangChain cannot automatically propagate configuration, including callbacks necessary for `astream_events()`, to child runnables if you are running `async` code in `python<=3.10`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
"\n",
|
||||
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
|
||||
"\n",
|
||||
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in older Python versions.\n",
|
||||
"\n",
|
||||
"This guide also requires `langchain-core>=0.2.16`.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Say you have a custom tool that calls a chain that condenses its input by prompting a chat model to return only 10 words, then reversing the output. First, define it in a naive way:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"model\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic langchain_core\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"model = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"async def special_summarization_tool(long_text: str) -> str:\n",
|
||||
" \"\"\"A tool that summarizes input text using advanced techniques.\"\"\"\n",
|
||||
" prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def reverse(x: str):\n",
|
||||
" return x[::-1]\n",
|
||||
"\n",
|
||||
" chain = prompt | model | StrOutputParser() | reverse\n",
|
||||
" summary = await chain.ainvoke({\"long_text\": long_text})\n",
|
||||
" return summary"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Invoking the tool directly works just fine:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'.yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"LONG_TEXT = \"\"\"\n",
|
||||
"NARRATOR:\n",
|
||||
"(Black screen with text; The sound of buzzing bees can be heard)\n",
|
||||
"According to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\n",
|
||||
"BARRY BENSON:\n",
|
||||
"(Barry is picking out a shirt)\n",
|
||||
"Yellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\n",
|
||||
"JANET BENSON:\n",
|
||||
"Barry! Breakfast is ready!\n",
|
||||
"BARRY:\n",
|
||||
"Coming! Hang on a second.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"await special_summarization_tool.ainvoke({\"long_text\": LONG_TEXT})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But if you wanted to access the raw output from the chat model rather than the full tool, you might try to use the [`astream_events()`](/docs/how_to/streaming/#using-stream-events) method and look for an `on_chat_model_end` event. Here's what happens:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"stream = special_summarization_tool.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for event in stream:\n",
|
||||
" if event[\"event\"] == \"on_chat_model_end\":\n",
|
||||
" # Never triggers in python<=3.10!\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You'll notice (unless you're running through this guide in `python>=3.11`) that there are no chat model events emitted from the child run!\n",
|
||||
"\n",
|
||||
"This is because the example above does not pass the tool's config object into the internal chain. To fix this, redefine your tool to take a special parameter typed as `RunnableConfig` (see [this guide](/docs/how_to/tool_configure) for more details). You'll also need to pass that parameter through into the internal chain when executing it:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"async def special_summarization_tool_with_config(\n",
|
||||
" long_text: str, config: RunnableConfig\n",
|
||||
") -> str:\n",
|
||||
" \"\"\"A tool that summarizes input text using advanced techniques.\"\"\"\n",
|
||||
" prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def reverse(x: str):\n",
|
||||
" return x[::-1]\n",
|
||||
"\n",
|
||||
" chain = prompt | model | StrOutputParser() | reverse\n",
|
||||
" # Pass the \"config\" object as an argument to any executed runnables\n",
|
||||
" summary = await chain.ainvoke({\"long_text\": long_text}, config=config)\n",
|
||||
" return summary"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now try the same `astream_events()` call as before with your new tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_end', 'data': {'output': AIMessage(content='Bee defies physics; Barry chooses outfit for graduation day.', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-d23abc80-0dce-4f74-9d7b-fb98ca4f2a9e', usage_metadata={'input_tokens': 182, 'output_tokens': 16, 'total_tokens': 198}), 'input': {'messages': [[HumanMessage(content=\"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n\\nNARRATOR:\\n(Black screen with text; The sound of buzzing bees can be heard)\\nAccording to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\\nBARRY BENSON:\\n(Barry is picking out a shirt)\\nYellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\\nJANET BENSON:\\nBarry! Breakfast is ready!\\nBARRY:\\nComing! Hang on a second.\\n\")]]}}, 'run_id': 'd23abc80-0dce-4f74-9d7b-fb98ca4f2a9e', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['f25c41fe-8972-4893-bc40-cecf3922c1fa']}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"stream = special_summarization_tool_with_config.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for event in stream:\n",
|
||||
" if event[\"event\"] == \"on_chat_model_end\":\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Awesome! This time there's an event emitted.\n",
|
||||
"\n",
|
||||
"For streaming, `astream_events()` automatically calls internal runnables in a chain with streaming enabled if possible, so if you wanted to a stream of tokens as they are generated from the chat model, you could simply filter to look for `on_chat_model_stream` events with no other changes:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"stream = special_summarization_tool_with_config.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for event in stream:\n",
|
||||
" if event[\"event\"] == \"on_chat_model_stream\":\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now seen how to stream events from within a tool. Next, check out the following guides for more on using tools:\n",
|
||||
"\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"- [Dispatch custom callback events](/docs/how_to/callbacks_custom_events)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
235
docs/docs/how_to/tool_streaming.ipynb
Normal file
235
docs/docs/how_to/tool_streaming.ipynb
Normal file
@@ -0,0 +1,235 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to stream tool calls\n",
|
||||
"\n",
|
||||
"When tools are called in a streaming context, \n",
|
||||
"[message chunks](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"will be populated with [tool call chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n",
|
||||
"objects in a list via the `.tool_call_chunks` attribute. A `ToolCallChunk` includes \n",
|
||||
"optional string fields for the tool `name`, `args`, and `id`, and includes an optional \n",
|
||||
"integer field `index` that can be used to join chunks together. Fields are optional \n",
|
||||
"because portions of a tool call may be streamed across different chunks (e.g., a chunk \n",
|
||||
"that includes a substring of the arguments may have null values for the tool name and id).\n",
|
||||
"\n",
|
||||
"Because message chunks inherit from their parent message class, an \n",
|
||||
"[AIMessageChunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
|
||||
"with tool call chunks will also include `.tool_calls` and `.invalid_tool_calls` fields. \n",
|
||||
"These fields are parsed best-effort from the message's tool call chunks.\n",
|
||||
"\n",
|
||||
"Note that not all providers currently support streaming for tool calls. Before we start let's define our tools and our model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's define our query and stream our output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_3aQwTP9CYlFxwOvQZPHDu6wL', 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': ': 3, ', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '\"b\": 1', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': None, 'args': '2}', 'id': None, 'index': 0}]\n",
|
||||
"[{'name': 'Add', 'args': '', 'id': 'call_SQUoSsJz2p9Kx2x73GOgN1ja', 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '{\"a\"', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ': 11,', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': ' \"b\": ', 'id': None, 'index': 1}]\n",
|
||||
"[{'name': None, 'args': '49}', 'id': None, 'index': 1}]\n",
|
||||
"[]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is 3 * 12? Also, what is 11 + 49?\"\n",
|
||||
"\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" print(chunk.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that adding message chunks will merge their corresponding tool call chunks. This is the principle by which LangChain's various [tool output parsers](/docs/how_to/output_parser_structured) support streaming.\n",
|
||||
"\n",
|
||||
"For example, below we accumulate tool call chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': '', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\"', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, ', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 1', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\"', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11,', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": ', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n",
|
||||
"[{'name': 'Multiply', 'args': '{\"a\": 3, \"b\": 12}', 'id': 'call_AkL3dVeCjjiqvjv8ckLxL3gP', 'index': 0}, {'name': 'Add', 'args': '{\"a\": 11, \"b\": 49}', 'id': 'call_b4iMiB3chGNGqbt5SjqqD2Wh', 'index': 1}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_call_chunks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'str'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_call_chunks[0][\"args\"]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And below we accumulate tool calls to demonstrate partial parsing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[]\n",
|
||||
"[]\n",
|
||||
"[{'name': 'Multiply', 'args': {}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 1}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n",
|
||||
"[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_4p0D4tHVXSiae9Mu0e8jlI1m'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_54Hx3DGjZitFlEjgMe1DYonh'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"first = True\n",
|
||||
"async for chunk in llm_with_tools.astream(query):\n",
|
||||
" if first:\n",
|
||||
" gathered = chunk\n",
|
||||
" first = False\n",
|
||||
" else:\n",
|
||||
" gathered = gathered + chunk\n",
|
||||
"\n",
|
||||
" print(gathered.tool_calls)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'dict'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(gathered.tool_calls[0][\"args\"]))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -228,7 +228,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -419,13 +419,13 @@
|
||||
"Invoking: `exponentiate` with `{'base': 405, 'exponent': 2}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[38;5;200m\u001b[1;3m164025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of taking 3 to the fifth power is 243. \n",
|
||||
"\u001b[0m\u001b[38;5;200m\u001b[1;3m13286025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of taking 3 to the fifth power is 243. \n",
|
||||
"\n",
|
||||
"The sum of twelve and three is 15. \n",
|
||||
"\n",
|
||||
"Multiplying 243 by 15 gives 3645. \n",
|
||||
"\n",
|
||||
"Finally, squaring 3645 gives 164025.\u001b[0m\n",
|
||||
"Finally, squaring 3645 gives 13286025.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@@ -434,7 +434,7 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'Take 3 to the fifth power and multiply that by the sum of twelve and three, then square the whole result',\n",
|
||||
" 'output': 'The result of taking 3 to the fifth power is 243. \\n\\nThe sum of twelve and three is 15. \\n\\nMultiplying 243 by 15 gives 3645. \\n\\nFinally, squaring 3645 gives 164025.'}"
|
||||
" 'output': 'The result of taking 3 to the fifth power is 243. \\n\\nThe sum of twelve and three is 15. \\n\\nMultiplying 243 by 15 gives 3645. \\n\\nFinally, squaring 3645 gives 13286025.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
|
||||
@@ -7,9 +7,18 @@
|
||||
"source": [
|
||||
"# How to handle tool errors\n",
|
||||
"\n",
|
||||
"Using a model to invoke a tool has some obvious potential failure modes. Firstly, the model needs to return a output that can be parsed at all. Secondly, the model needs to return tool arguments that are valid.\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"We can build error handling into our chains to mitigate these failure modes."
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Calling tools with an LLM is generally more reliable than pure prompting, but it isn't perfect. The model may try to call a tool that doesn't exist or fail to return arguments that match the requested schema. Strategies like keeping schemas simple, reducing the number of tools you pass at once, and having good names and descriptions can help mitigate this risk, but aren't foolproof.\n",
|
||||
"\n",
|
||||
"This guide covers some ways to build error handling into your chains to mitigate these failure modes."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -42,7 +51,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "08785b6d-722d-4620-b6ec-36deb3842c69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -72,7 +81,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "86258950-5e61-4340-81b9-84a5d26e8773",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -82,12 +91,14 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"id": "1d20604e-c4d1-4d21-841b-23e4f61aec36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -99,28 +110,13 @@
|
||||
"@tool\n",
|
||||
"def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:\n",
|
||||
" \"\"\"Do something complex with a complex tool.\"\"\"\n",
|
||||
" return int_arg * float_arg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "553c2c13-28c8-4451-8a3a-6c31d52dc31d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
" return int_arg * float_arg\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools(\n",
|
||||
" [complex_tool],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "802b2eca-9f79-4d6c-8257-85139ca5c752",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
")\n",
|
||||
"\n",
|
||||
"# Define chain\n",
|
||||
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool"
|
||||
]
|
||||
@@ -135,7 +131,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 6,
|
||||
"id": "d354664c-ac44-4967-a35f-8912b3ad9477",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -146,14 +142,14 @@
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:241\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 237\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 238\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 239\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 240\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 241\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 242\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 243\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 244\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 245\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 246\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 247\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 248\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:387\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 385\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 386\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 387\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 388\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 389\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:378\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 364\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_tool_start(\n\u001b[1;32m 365\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdescription\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdescription},\n\u001b[1;32m 366\u001b[0m tool_input \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tool_input, \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mstr\u001b[39m(tool_input),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 375\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 376\u001b[0m )\n\u001b[1;32m 377\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 378\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 379\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 380\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 382\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 383\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 384\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:283\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 282\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 283\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 284\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 285\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 286\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 287\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 288\u001b[0m }\n\u001b[1;32m 289\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
|
||||
"File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:526\u001b[0m, in \u001b[0;36mBaseModel.parse_obj\u001b[0;34m(cls, obj)\u001b[0m\n\u001b[1;32m 524\u001b[0m exc \u001b[38;5;241m=\u001b[39m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m expected dict not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mobj\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 525\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ValidationError([ErrorWrapper(exc, loc\u001b[38;5;241m=\u001b[39mROOT_KEY)], \u001b[38;5;28mcls\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m--> 526\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mobj\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:341\u001b[0m, in \u001b[0;36mBaseModel.__init__\u001b[0;34m(__pydantic_self__, **data)\u001b[0m\n\u001b[1;32m 339\u001b[0m values, fields_set, validation_error \u001b[38;5;241m=\u001b[39m validate_model(__pydantic_self__\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m, data)\n\u001b[1;32m 340\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m validation_error:\n\u001b[0;32m--> 341\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m validation_error\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 343\u001b[0m object_setattr(__pydantic_self__, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__dict__\u001b[39m\u001b[38;5;124m'\u001b[39m, values)\n",
|
||||
"Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/runnables/base.py:2572\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 2570\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m step\u001b[38;5;241m.\u001b[39minvoke(\u001b[38;5;28minput\u001b[39m, config, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 2571\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2572\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2573\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2574\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:380\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 373\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 374\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 375\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 376\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 377\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 378\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 379\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 380\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 381\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 382\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 383\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 384\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 385\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 386\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 387\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 388\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 389\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:537\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 535\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 536\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 537\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 538\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 539\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:526\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 524\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 525\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(_set_config_context, child_config)\n\u001b[0;32m--> 526\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 527\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 528\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 529\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(\n\u001b[1;32m 530\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m context\u001b[38;5;241m.\u001b[39mrun(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 534\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:424\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 422\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 423\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 424\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 425\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 426\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 427\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 428\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 429\u001b[0m }\n\u001b[1;32m 430\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:526\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.parse_obj\u001b[0;34m()\u001b[0m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
|
||||
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for complex_toolSchema\ndict_arg\n field required (type=value_error.missing)"
|
||||
]
|
||||
}
|
||||
@@ -176,10 +172,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"id": "8fedb550-683d-45ae-8876-ae7acb332019",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Calling tool with arguments:\n",
|
||||
"\n",
|
||||
"{'int_arg': 5, 'float_arg': 2.1}\n",
|
||||
"\n",
|
||||
"raised the following error:\n",
|
||||
"\n",
|
||||
"<class 'pydantic.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
|
||||
"dict_arg\n",
|
||||
" field required (type=value_error.missing)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any\n",
|
||||
"\n",
|
||||
@@ -193,32 +205,8 @@
|
||||
" return f\"Calling tool with arguments:\\n\\n{tool_args}\\n\\nraised the following error:\\n\\n{type(e)}: {e}\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "71a2c98d-c0be-4c0a-bb3d-41ad4596526c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Calling tool with arguments:\n",
|
||||
"\n",
|
||||
"{'int_arg': 5, 'float_arg': 2.1}\n",
|
||||
"\n",
|
||||
"raised the following error:\n",
|
||||
"\n",
|
||||
"<class 'pydantic.v1.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
|
||||
"dict_arg\n",
|
||||
" field required (type=value_error.missing)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool\n",
|
||||
"\n",
|
||||
"print(\n",
|
||||
" chain.invoke(\n",
|
||||
" \"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg\"\n",
|
||||
@@ -238,7 +226,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 10,
|
||||
"id": "02cc4223-35fa-4240-976a-012299ca703c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -248,19 +236,22 @@
|
||||
"10.5"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n",
|
||||
"\n",
|
||||
"better_model = ChatOpenAI(model=\"gpt-4-1106-preview\", temperature=0).bind_tools(\n",
|
||||
" [complex_tool], tool_choice=\"complex_tool\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"better_chain = better_model | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n",
|
||||
"\n",
|
||||
"chain_with_fallback = chain.with_fallbacks([better_chain])\n",
|
||||
"\n",
|
||||
"chain_with_fallback.invoke(\n",
|
||||
" \"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg\"\n",
|
||||
")"
|
||||
@@ -271,7 +262,7 @@
|
||||
"id": "412f8c4e-cc83-4d87-84a1-5ba2f8edb1e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the [Langsmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds."
|
||||
"Looking at the [LangSmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -286,17 +277,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 11,
|
||||
"id": "b5659956-9454-468a-9753-a3ff9052b8f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"from typing import Any\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CustomToolException(Exception):\n",
|
||||
@@ -336,7 +323,7 @@
|
||||
"# affect the prompt at all, but gives us the option to insert an arbitrary list of Messages\n",
|
||||
"# into the prompt if needed. We'll use this on retries to insert the error message.\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"{input}\"), MessagesPlaceholder(\"last_output\", optional=True)]\n",
|
||||
" [(\"human\", \"{input}\"), (\"placeholder\", \"{last_output}\")]\n",
|
||||
")\n",
|
||||
"chain = prompt | llm_with_tools | tool_custom_exception\n",
|
||||
"\n",
|
||||
@@ -348,7 +335,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 12,
|
||||
"id": "4c45f5bd-cbb4-47d5-b4b6-aec50673c750",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -358,7 +345,7 @@
|
||||
"10.5"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -378,6 +365,24 @@
|
||||
"source": [
|
||||
"And our chain succeeds! Looking at the [LangSmith trace](https://smith.langchain.com/public/c11e804c-e14f-4059-bd09-64766f999c14/r), we can see that indeed our initial chain still fails, and it's only on retrying that the chain succeeds."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b97af9f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Now you've seen some strategies how to handle tool calling errors. Next, you can learn more about how to use tools:\n",
|
||||
"\n",
|
||||
"- Few shot prompting [with tools](/docs/how_to/tools_few_shot/)\n",
|
||||
"- Stream [tool calls](/docs/how_to/tool_streaming/)\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -396,7 +401,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
175
docs/docs/how_to/tools_few_shot.ipynb
Normal file
175
docs/docs/how_to/tools_few_shot.ipynb
Normal file
@@ -0,0 +1,175 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use few-shot prompting with tool calling\n",
|
||||
"\n",
|
||||
"For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt.\n",
|
||||
"\n",
|
||||
"First let's define our tools and model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def add(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Adds a and b.\"\"\"\n",
|
||||
" return a + b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiplies a and b.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [add, multiply]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools(tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's run our model where we can notice that even with some special instructions our model can get tripped up by order of operations. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_T88XN6ECucTgbXXkyDeC2CQj'},\n",
|
||||
" {'name': 'Add',\n",
|
||||
" 'args': {'a': 952, 'b': -20},\n",
|
||||
" 'id': 'call_licdlmGsRqzup8rhqJSb1yZ4'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_with_tools.invoke(\n",
|
||||
" \"Whats 119 times 8 minus 20. Don't do any math yourself, only use tools for math. Respect order of operations\"\n",
|
||||
").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The model shouldn't be trying to add anything yet, since it technically can't know the results of 119 * 8 yet.\n",
|
||||
"\n",
|
||||
"By adding a prompt with some examples we can correct this behavior:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Multiply',\n",
|
||||
" 'args': {'a': 119, 'b': 8},\n",
|
||||
" 'id': 'call_9MvuwQqg7dlJupJcoTWiEsDo'}]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, ToolMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" HumanMessage(\n",
|
||||
" \"What's the product of 317253 and 128472 plus four\", name=\"example_user\"\n",
|
||||
" ),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[\n",
|
||||
" {\"name\": \"Multiply\", \"args\": {\"x\": 317253, \"y\": 128472}, \"id\": \"1\"}\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054784\", tool_call_id=\"1\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" tool_calls=[{\"name\": \"Add\", \"args\": {\"x\": 16505054784, \"y\": 4}, \"id\": \"2\"}],\n",
|
||||
" ),\n",
|
||||
" ToolMessage(\"16505054788\", tool_call_id=\"2\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"The product of 317253 and 128472 plus four is 16505054788\",\n",
|
||||
" name=\"example_assistant\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"system = \"\"\"You are bad at math but are an expert at using a calculator. \n",
|
||||
"\n",
|
||||
"Use past tool usage as an example of how to correctly use the tools.\"\"\"\n",
|
||||
"few_shot_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", system),\n",
|
||||
" *examples,\n",
|
||||
" (\"human\", \"{query}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = {\"query\": RunnablePassthrough()} | few_shot_prompt | llm_with_tools\n",
|
||||
"chain.invoke(\"Whats 119 times 8 minus 20\").tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we get the correct output this time.\n",
|
||||
"\n",
|
||||
"Here's what the [LangSmith trace](https://smith.langchain.com/public/f70550a1-585f-4c9d-a643-13148ab1616f/r) looks like."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user