mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-05 08:40:36 +00:00
Compare commits
690 Commits
langchain-
...
renderer
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bdc7383917 | ||
|
|
8c1f310f11 | ||
|
|
8c0217188a | ||
|
|
398b2b9c51 | ||
|
|
7b1066341b | ||
|
|
d5b2a93c6d | ||
|
|
57c13b4ef8 | ||
|
|
168e9ed3a5 | ||
|
|
1e750f12f6 | ||
|
|
3b3ed72d35 | ||
|
|
e1190c8f3c | ||
|
|
2b87e330b0 | ||
|
|
aeeda370aa | ||
|
|
d2db561347 | ||
|
|
f5ff7f178b | ||
|
|
753edf9c80 | ||
|
|
aa358f2be4 | ||
|
|
60103fc4a5 | ||
|
|
4964ba74db | ||
|
|
d90379210a | ||
|
|
987099cfcd | ||
|
|
0cd3f93361 | ||
|
|
5d4133d82f | ||
|
|
bcac6c3aff | ||
|
|
efb4c12abe | ||
|
|
9ac302cb97 | ||
|
|
7ee2822ec2 | ||
|
|
3b7b933aa2 | ||
|
|
3c42bf8d97 | ||
|
|
4bb3d5c488 | ||
|
|
f824f6d925 | ||
|
|
f9aea3db07 | ||
|
|
9eda8f2fe8 | ||
|
|
86326269a1 | ||
|
|
4c97a9ee53 | ||
|
|
0deb98ac0c | ||
|
|
75c7c3a1a7 | ||
|
|
abe7566d7d | ||
|
|
360a70c8a8 | ||
|
|
1c2b9cc9ab | ||
|
|
401d469a92 | ||
|
|
b108b4d010 | ||
|
|
976b456619 | ||
|
|
5da7eb97cb | ||
|
|
a7b4175091 | ||
|
|
12e0c28a6e | ||
|
|
a349fce880 | ||
|
|
7545b1d29b | ||
|
|
d5be160af0 | ||
|
|
cd6812342e | ||
|
|
abb3066150 | ||
|
|
bf7763d9b0 | ||
|
|
59d7adff8f | ||
|
|
60db79a38a | ||
|
|
bc4cd9c5cc | ||
|
|
cb6cf4b631 | ||
|
|
0bce28cd30 | ||
|
|
8711c61298 | ||
|
|
3ab49c0036 | ||
|
|
61daa16e5d | ||
|
|
51e75cf59d | ||
|
|
6a1a0d977a | ||
|
|
dd4d4411c9 | ||
|
|
b03c801523 | ||
|
|
41f7620989 | ||
|
|
066a5a209f | ||
|
|
9b3a025f9c | ||
|
|
ad7f2ec67d | ||
|
|
bd5c92a113 | ||
|
|
a4bcb45f65 | ||
|
|
7193634ae6 | ||
|
|
1fcf875fe3 | ||
|
|
255ad39ae3 | ||
|
|
c2d43544cc | ||
|
|
3c917204dc | ||
|
|
8698cb9b28 | ||
|
|
710197e18c | ||
|
|
48d6ea427f | ||
|
|
0a4ee864e9 | ||
|
|
b3e53ffca0 | ||
|
|
e162893d7f | ||
|
|
db6f46c1a6 | ||
|
|
94452a94b1 | ||
|
|
50484be330 | ||
|
|
9b82707ea6 | ||
|
|
505a2e8743 | ||
|
|
677408bfc9 | ||
|
|
883e90d06e | ||
|
|
2b08e9e265 | ||
|
|
ae4c0ed25a | ||
|
|
a34e650f8b | ||
|
|
1007a715a5 | ||
|
|
ca798bc6ea | ||
|
|
4fe8403bfb | ||
|
|
fe4f10047b | ||
|
|
a3bae56a48 | ||
|
|
a70b7a688e | ||
|
|
0c2ebe5f47 | ||
|
|
3d54784e6d | ||
|
|
9ab7a6df39 | ||
|
|
6b46b5e9ce | ||
|
|
109a70fc64 | ||
|
|
86ee4f0daa | ||
|
|
93d0ad97fe | ||
|
|
3dfd055411 | ||
|
|
90559fde70 | ||
|
|
e8a8286012 | ||
|
|
2ae718796e | ||
|
|
74749c909d | ||
|
|
cf38981bb7 | ||
|
|
b483bf5095 | ||
|
|
093ae04d58 | ||
|
|
ff0c06b1e5 | ||
|
|
e271f75bee | ||
|
|
c6660df58e | ||
|
|
aa6415aa7d | ||
|
|
226802f0c4 | ||
|
|
01783d67fc | ||
|
|
616d06d7fe | ||
|
|
5564d9e404 | ||
|
|
9f791b6ad5 | ||
|
|
74c4cbb859 | ||
|
|
ddfbca38df | ||
|
|
931b41b30f | ||
|
|
6a66d8e2ca | ||
|
|
858ce264ef | ||
|
|
55705c0f5e | ||
|
|
62c8a67f56 | ||
|
|
3e835a1aa1 | ||
|
|
39f6c4169d | ||
|
|
e25a5966b5 | ||
|
|
a56ff199a7 | ||
|
|
60ba02f5db | ||
|
|
70761af8cf | ||
|
|
bf839676c7 | ||
|
|
f01f12ce1e | ||
|
|
7a0b36501f | ||
|
|
3b7b276f6f | ||
|
|
6605ae22f6 | ||
|
|
c2b2e3266c | ||
|
|
c5e0acf6f0 | ||
|
|
aacc6198b9 | ||
|
|
8235bae48e | ||
|
|
5ee6e22983 | ||
|
|
bd4b68cd54 | ||
|
|
d96f67b06f | ||
|
|
14f0cdad58 | ||
|
|
893299c3c9 | ||
|
|
dd25d08c06 | ||
|
|
158701ab3c | ||
|
|
a54deba6bc | ||
|
|
c6b7db6587 | ||
|
|
722c8f50ea | ||
|
|
56ac94e014 | ||
|
|
ea96133890 | ||
|
|
e2304ebcdb | ||
|
|
c437b1aab7 | ||
|
|
42a379c75c | ||
|
|
3e7bb7690c | ||
|
|
19356b6445 | ||
|
|
9ff249a38d | ||
|
|
892bd4c29b | ||
|
|
ada03dd273 | ||
|
|
e09c6bb58b | ||
|
|
1c661fd849 | ||
|
|
7a0af56177 | ||
|
|
6838804116 | ||
|
|
570d45b2a1 | ||
|
|
9944ad7f5f | ||
|
|
764f1958dd | ||
|
|
c374c98389 | ||
|
|
af65cac609 | ||
|
|
79a64207f5 | ||
|
|
c8c67dde6f | ||
|
|
fbeeb6da75 | ||
|
|
551640a030 | ||
|
|
c4f2bc9540 | ||
|
|
32966a08a9 | ||
|
|
9ef15691d6 | ||
|
|
338180f383 | ||
|
|
513e491ce9 | ||
|
|
694ae87748 | ||
|
|
c816d03699 | ||
|
|
8171efd07a | ||
|
|
b61de9728e | ||
|
|
c72bcda4f2 | ||
|
|
9a877c7adb | ||
|
|
4a77a3ab19 | ||
|
|
181a61982f | ||
|
|
f40b2c6f9d | ||
|
|
d1b7a934aa | ||
|
|
83643cbdfe | ||
|
|
b5e2ba3a47 | ||
|
|
e4279f80cd | ||
|
|
984c7a9d42 | ||
|
|
8e89178047 | ||
|
|
73c76b9628 | ||
|
|
7114aed78f | ||
|
|
e002c855bd | ||
|
|
c417803908 | ||
|
|
4160b700e6 | ||
|
|
1055b9a309 | ||
|
|
46c9784127 | ||
|
|
712aa0c529 | ||
|
|
f9a6d5c845 | ||
|
|
8bd368d07e | ||
|
|
75e966a2fa | ||
|
|
d1cdde267a | ||
|
|
ada1e5cc64 | ||
|
|
41e232cb82 | ||
|
|
57783c5e55 | ||
|
|
5bc774827b | ||
|
|
7234fd0f51 | ||
|
|
bcbb43480c | ||
|
|
bae82e966a | ||
|
|
a766815a99 | ||
|
|
8f7cc73817 | ||
|
|
77209f315e | ||
|
|
ce0b0f22a1 | ||
|
|
869523ad72 | ||
|
|
42257b120f | ||
|
|
345fd3a556 | ||
|
|
034257e9bf | ||
|
|
e832bbb486 | ||
|
|
b626c3ca23 | ||
|
|
e01e5d5a91 | ||
|
|
12eff6a130 | ||
|
|
cb654a3245 | ||
|
|
45b394268c | ||
|
|
00ad197502 | ||
|
|
276be6cdd4 | ||
|
|
d04e899b56 | ||
|
|
b6bf2bb234 | ||
|
|
5dbbdcbf8e | ||
|
|
265e650e64 | ||
|
|
32ba8cfab0 | ||
|
|
74e705250f | ||
|
|
3d6e8547f9 | ||
|
|
a1268d9e9a | ||
|
|
513f1d8037 | ||
|
|
08c466c603 | ||
|
|
23c22fcbc9 | ||
|
|
b45bf78d2e | ||
|
|
8203c1ff87 | ||
|
|
936aedd10c | ||
|
|
20e3662acf | ||
|
|
9212c9fcb8 | ||
|
|
86e8224cf1 | ||
|
|
232908a46d | ||
|
|
84dc2dd059 | ||
|
|
71811e0547 | ||
|
|
36cad5d25c | ||
|
|
912751e268 | ||
|
|
0908b01cb2 | ||
|
|
ce4e29ae42 | ||
|
|
ad101adec8 | ||
|
|
27b9ea14a5 | ||
|
|
1710423de3 | ||
|
|
7e115da16c | ||
|
|
55bd8e582b | ||
|
|
89804c3026 | ||
|
|
7f180f996b | ||
|
|
ea43f40daf | ||
|
|
2aaf86ddae | ||
|
|
5a7eac191a | ||
|
|
05d31a2f00 | ||
|
|
3237909221 | ||
|
|
2b5631a6be | ||
|
|
f0f4532579 | ||
|
|
cb79e80b0b | ||
|
|
d92f2251c8 | ||
|
|
76a193decc | ||
|
|
34edfe4a16 | ||
|
|
9eacce9356 | ||
|
|
4197c9c85f | ||
|
|
e4183cbc4e | ||
|
|
c3cce98d86 | ||
|
|
86a3f6edf1 | ||
|
|
f9fdca6cc2 | ||
|
|
058a64c563 | ||
|
|
28e956735c | ||
|
|
6f54abc252 | ||
|
|
2d4689d721 | ||
|
|
5ba1899cd7 | ||
|
|
3f76c9e908 | ||
|
|
c1fced9269 | ||
|
|
8f019e91d7 | ||
|
|
9aabb446c5 | ||
|
|
f2f0e0e13d | ||
|
|
6c07eb0c12 | ||
|
|
9b3ce16982 | ||
|
|
9e03864d64 | ||
|
|
02ff78deb8 | ||
|
|
c3a8716589 | ||
|
|
f32d57f6f0 | ||
|
|
235d91940d | ||
|
|
344adad056 | ||
|
|
be79ce9336 | ||
|
|
57c1239643 | ||
|
|
fe2e5a3b74 | ||
|
|
a24a9c6427 | ||
|
|
4367e89c9a | ||
|
|
28f744c1f5 | ||
|
|
835926153b | ||
|
|
035a9c9609 | ||
|
|
67e58fdc2e | ||
|
|
6b8963ad92 | ||
|
|
aa49163bdf | ||
|
|
ffe75d1e46 | ||
|
|
51005e2776 | ||
|
|
2904c50cd5 | ||
|
|
80560419b0 | ||
|
|
b57aa89f34 | ||
|
|
f26ab93df8 | ||
|
|
c1ef731503 | ||
|
|
05bf98b2f9 | ||
|
|
3999761201 | ||
|
|
e08879147b | ||
|
|
0d495f3f63 | ||
|
|
e0e40f3f63 | ||
|
|
feb73d4281 | ||
|
|
17b486a37b | ||
|
|
02495ae7c5 | ||
|
|
51942c03eb | ||
|
|
95883a99a9 | ||
|
|
12ddb4fc6f | ||
|
|
cfed68e06f | ||
|
|
1925bde32e | ||
|
|
35f4aa927b | ||
|
|
f23bec7be6 | ||
|
|
abb0cecb44 | ||
|
|
db7e7b69e3 | ||
|
|
8b40428f58 | ||
|
|
ba3e219d83 | ||
|
|
234394f631 | ||
|
|
5fc5ed463c | ||
|
|
148088a588 | ||
|
|
ef868bc24b | ||
|
|
62f13f95e4 | ||
|
|
29064848f9 | ||
|
|
c040dc7017 | ||
|
|
24fa17593f | ||
|
|
584a1e30ac | ||
|
|
1a911018bc | ||
|
|
67012c2558 | ||
|
|
af129974a3 | ||
|
|
51a0d4574e | ||
|
|
b2daba37c7 | ||
|
|
14f3014cce | ||
|
|
3280a5b49b | ||
|
|
7fcef2556c | ||
|
|
328d0c99f2 | ||
|
|
c3d4126eb1 | ||
|
|
8250c177de | ||
|
|
59bef31997 | ||
|
|
c34ad8c163 | ||
|
|
89128b7a49 | ||
|
|
4e676a63b8 | ||
|
|
4050d6ea2b | ||
|
|
a6fc74f379 | ||
|
|
75cba742e5 | ||
|
|
58192d617f | ||
|
|
1e748a6d40 | ||
|
|
91fed3ace7 | ||
|
|
8ba868d3b0 | ||
|
|
9120cf5df2 | ||
|
|
64dbc52cae | ||
|
|
ad502e8d50 | ||
|
|
cb183a9bf1 | ||
|
|
d700ce8545 | ||
|
|
39fd44579a | ||
|
|
339e3b7f55 | ||
|
|
3c53cea760 | ||
|
|
c438b5b78e | ||
|
|
efcb04f84b | ||
|
|
222b1ba112 | ||
|
|
f021be510e | ||
|
|
64d68c17cd | ||
|
|
48fba40fce | ||
|
|
e60f88ccdd | ||
|
|
85aa218564 | ||
|
|
8e86080def | ||
|
|
e850de2422 | ||
|
|
593de8a913 | ||
|
|
99a3cad258 | ||
|
|
161b02a8be | ||
|
|
50258a7dda | ||
|
|
9b45374118 | ||
|
|
3796672c67 | ||
|
|
03178ee74f | ||
|
|
9d4350e69a | ||
|
|
7a197539aa | ||
|
|
77ad857934 | ||
|
|
8fd231086e | ||
|
|
6db25b4e31 | ||
|
|
17c127531a | ||
|
|
58b118544e | ||
|
|
9a8fe58ebe | ||
|
|
23bba18f92 | ||
|
|
98b2e7b195 | ||
|
|
0061ded002 | ||
|
|
25cf1a74d5 | ||
|
|
b0f014666d | ||
|
|
bc7e32f315 | ||
|
|
f2dd31b9e8 | ||
|
|
ef3df45d9d | ||
|
|
cbd5720011 | ||
|
|
f78ae1d932 | ||
|
|
f397a84a59 | ||
|
|
afe89a1411 | ||
|
|
5119ab2fb9 | ||
|
|
52da6a160d | ||
|
|
c599732e1a | ||
|
|
01352bb55f | ||
|
|
56e5aa4dd9 | ||
|
|
1f751343e2 | ||
|
|
13140dc4ff | ||
|
|
ba0dca46d7 | ||
|
|
c01467b1f4 | ||
|
|
86509161b0 | ||
|
|
8fad2e209a | ||
|
|
678a19a5f7 | ||
|
|
ceb73ad06f | ||
|
|
1ad1dc5303 | ||
|
|
2d81a72884 | ||
|
|
dac355fc62 | ||
|
|
a7ae16f912 | ||
|
|
3e92ed8056 | ||
|
|
ed8e9c437a | ||
|
|
eabcfaa3d6 | ||
|
|
acaf214a45 | ||
|
|
16cce76a68 | ||
|
|
8a57102918 | ||
|
|
4d82cea71f | ||
|
|
a8098f5ddb | ||
|
|
6ffa0acf32 | ||
|
|
1bad0ac946 | ||
|
|
8cbce684d4 | ||
|
|
75ed9ee929 | ||
|
|
0214246dc6 | ||
|
|
410e9add44 | ||
|
|
0c9a034ed7 | ||
|
|
2b9f1469d8 | ||
|
|
ee32369265 | ||
|
|
dcec133b85 | ||
|
|
f34337447f | ||
|
|
2443e85533 | ||
|
|
86698b02a9 | ||
|
|
596c062cba | ||
|
|
c64b0a3095 | ||
|
|
10b12e1c08 | ||
|
|
569d325a59 | ||
|
|
93049d1563 | ||
|
|
04631439c9 | ||
|
|
f39e1a2288 | ||
|
|
2bc50fb895 | ||
|
|
aa6c31df53 | ||
|
|
627a337887 | ||
|
|
f40e341a03 | ||
|
|
6e1df72a88 | ||
|
|
e71b0b5827 | ||
|
|
9d6cabe84a | ||
|
|
7ff05357ba | ||
|
|
6dd0f095c3 | ||
|
|
00c70d98c2 | ||
|
|
fc5909ad6f | ||
|
|
af1f723ada | ||
|
|
a1899439fc | ||
|
|
d61bdeba25 | ||
|
|
7496fe2b16 | ||
|
|
8dfa3c5f1a | ||
|
|
93240fac68 | ||
|
|
611faa22c7 | ||
|
|
26c6e4a5ef | ||
|
|
404d92ded0 | ||
|
|
d7f70535ba | ||
|
|
d6995e814b | ||
|
|
8332a36f69 | ||
|
|
83d10df78d | ||
|
|
bbd7015b5d | ||
|
|
753353411f | ||
|
|
577ed68b59 | ||
|
|
25c270b5a5 | ||
|
|
cfea0e231a | ||
|
|
bf81ecd3b4 | ||
|
|
342df7cf83 | ||
|
|
cccc8fbe2f | ||
|
|
42207f5bef | ||
|
|
8acadc34f5 | ||
|
|
42ffcb2ff1 | ||
|
|
6ee8de62c0 | ||
|
|
8ba492ed6a | ||
|
|
9a010fb761 | ||
|
|
eb7c767e5b | ||
|
|
fd4ee08167 | ||
|
|
1a485f59b9 | ||
|
|
ee689412ab | ||
|
|
1c9ceff503 | ||
|
|
7c0459faf2 | ||
|
|
d3db83abe3 | ||
|
|
5b5ea2af30 | ||
|
|
baa3c975cb | ||
|
|
c838de5027 | ||
|
|
2edb512282 | ||
|
|
eb7c453b98 | ||
|
|
2416737c5f | ||
|
|
0ea1e89b2c | ||
|
|
96c21dfe56 | ||
|
|
5a9d4b0088 | ||
|
|
63004a0945 | ||
|
|
2d693c484e | ||
|
|
38783d07c9 | ||
|
|
fe26f937e4 | ||
|
|
a098d61226 | ||
|
|
95c3e5f85f | ||
|
|
18b8c8628a | ||
|
|
152c8cac33 | ||
|
|
cd07521170 | ||
|
|
170cc8aec3 | ||
|
|
fbfed65fb1 | ||
|
|
3d26807b92 | ||
|
|
2d968213d7 | ||
|
|
9aba9e3e33 | ||
|
|
4fda7bf4f2 | ||
|
|
d9eff44400 | ||
|
|
2df8ac402a | ||
|
|
e5541d1da7 | ||
|
|
8ba4f77734 | ||
|
|
6dd621d636 | ||
|
|
74947ec894 | ||
|
|
fea6b99b16 | ||
|
|
37cfc00310 | ||
|
|
53293dace8 | ||
|
|
12d65f17ff | ||
|
|
58b6c72375 | ||
|
|
5eabe90494 | ||
|
|
50186da0a1 | ||
|
|
45ed5f3f51 | ||
|
|
444c2a3d9f | ||
|
|
8a877120c3 | ||
|
|
bf3aefce93 | ||
|
|
63284ffebf | ||
|
|
d948783a4c | ||
|
|
16617dd239 | ||
|
|
45351d1bc6 | ||
|
|
28456c2c33 | ||
|
|
3c1d77dd64 | ||
|
|
9a66c43146 | ||
|
|
b51a1eba4d | ||
|
|
b4d5f3181b | ||
|
|
6b98140b38 | ||
|
|
98c0b093bb | ||
|
|
ed5914ff61 | ||
|
|
709664a079 | ||
|
|
16b55b0704 | ||
|
|
c3bcfad66d | ||
|
|
b859765752 | ||
|
|
e7e41eaabe | ||
|
|
14a9c7c44e | ||
|
|
fc93bed8c4 | ||
|
|
403142eaba | ||
|
|
1f81277b9b | ||
|
|
36813d2f00 | ||
|
|
b7d08bf764 | ||
|
|
58360a1e53 | ||
|
|
ef53ccf54b | ||
|
|
4633b4cf2b | ||
|
|
4f2e3bd7fd | ||
|
|
6548052f9e | ||
|
|
8d82160a8a | ||
|
|
d8a1f1114d | ||
|
|
b0ef5e778a | ||
|
|
aed64daabb | ||
|
|
25ba733218 | ||
|
|
3b0437c05b | ||
|
|
24b5c27bb1 | ||
|
|
80f8fe1793 | ||
|
|
eb096675a8 | ||
|
|
7a5d042bd2 | ||
|
|
90f4d8842f | ||
|
|
a042e804b4 | ||
|
|
4cf523949a | ||
|
|
98b64f3ae3 | ||
|
|
1bc0ea5496 | ||
|
|
ded53297e0 | ||
|
|
fb6108c8f5 | ||
|
|
72d4a8eeed | ||
|
|
a983465694 | ||
|
|
5448e16fe6 | ||
|
|
4be5537837 | ||
|
|
35439cf3bd | ||
|
|
0923136851 | ||
|
|
8e1aeb8ad5 | ||
|
|
54adcd9e82 | ||
|
|
fc79b372cb | ||
|
|
3587c60396 | ||
|
|
96bd0b0844 | ||
|
|
d07885f8b7 | ||
|
|
d4359d3de6 | ||
|
|
c0e3c3a350 | ||
|
|
bd39b2ccdf | ||
|
|
2316635add | ||
|
|
d8a101074f | ||
|
|
9799437bc2 | ||
|
|
e98a4fd19a | ||
|
|
f54cbf8ff5 | ||
|
|
b0b302ec6b | ||
|
|
6a59f76f2b | ||
|
|
e6207ad4f3 | ||
|
|
c6da9533ac | ||
|
|
7a5e1bcf99 | ||
|
|
332ffed393 | ||
|
|
a43515ca65 | ||
|
|
aab9cb666f | ||
|
|
6499897c87 | ||
|
|
711b8f1e52 | ||
|
|
25d1c1c9bb | ||
|
|
0e72ed39a0 | ||
|
|
f4ffef98a2 | ||
|
|
6b97418836 | ||
|
|
1418d3af00 | ||
|
|
e8bdf245eb | ||
|
|
4470d3b4a0 | ||
|
|
0614a53d9c | ||
|
|
9c76739425 | ||
|
|
68a90e2252 | ||
|
|
8ed2ba9301 | ||
|
|
c98bd8505f | ||
|
|
b2f58d37db | ||
|
|
d85e46321a | ||
|
|
f2e75f9500 | ||
|
|
30bca57aae | ||
|
|
8da35fba7f | ||
|
|
8530bbac2d | ||
|
|
8cd6ed3e1e | ||
|
|
5ae982145e | ||
|
|
dd00aac7ad | ||
|
|
242eeb537f | ||
|
|
da4fef8131 | ||
|
|
b6c8b6f944 | ||
|
|
d3624eaba1 | ||
|
|
61ebe7991c | ||
|
|
0812723789 | ||
|
|
875230d5bc | ||
|
|
8b3c5f93f5 | ||
|
|
c3caec5aaf | ||
|
|
0180716a95 | ||
|
|
b1e7b40b6a | ||
|
|
9a39f92aba | ||
|
|
e6b7a1769b | ||
|
|
cdc8e2d0c2 | ||
|
|
d02380c504 | ||
|
|
67b6f6c82a | ||
|
|
d8f89a5e9b | ||
|
|
5285336cb1 | ||
|
|
2d3f4e1a16 | ||
|
|
169f525cfb | ||
|
|
2656bfe941 | ||
|
|
e5046cbd72 | ||
|
|
1b555021f7 | ||
|
|
0ad8de5eb7 | ||
|
|
33dbad02fe | ||
|
|
23310626b3 | ||
|
|
e3f30b4cde | ||
|
|
22d9aed508 | ||
|
|
c4508ca7ef | ||
|
|
181dfef118 | ||
|
|
4ca2149b70 | ||
|
|
e59afe292d | ||
|
|
eb7f07ae36 | ||
|
|
700b1c7212 | ||
|
|
7976fb1663 | ||
|
|
9f8d18c028 | ||
|
|
aa648298ae | ||
|
|
fc644c0e1c | ||
|
|
3b5ac44e03 | ||
|
|
09919c2cd5 | ||
|
|
685c13e157 | ||
|
|
f3289b898c | ||
|
|
ec8d406441 | ||
|
|
6febb283f6 | ||
|
|
8607735b80 | ||
|
|
97a4ae50d2 | ||
|
|
1cf80a5956 | ||
|
|
aee3842a21 | ||
|
|
d0fae6cd54 | ||
|
|
4231cf0696 | ||
|
|
0c0db7c5db | ||
|
|
5e445a7e4e | ||
|
|
e3a03b324d |
@@ -10,7 +10,7 @@ You can use the dev container configuration in this folder to build and run the
|
||||
You may use the button above, or follow these steps to open this repo in a Codespace:
|
||||
1. Click the **Code** drop-down menu at the top of https://github.com/langchain-ai/langchain.
|
||||
1. Click on the **Codespaces** tab.
|
||||
1. Click **Create codespace on master** .
|
||||
1. Click **Create codespace on master**.
|
||||
|
||||
For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
|
||||
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/documentation.yml
vendored
9
.github/ISSUE_TEMPLATE/documentation.yml
vendored
@@ -26,6 +26,13 @@ body:
|
||||
[LangChain Github Discussions](https://github.com/langchain-ai/langchain/discussions),
|
||||
[LangChain Github Issues](https://github.com/langchain-ai/langchain/issues?q=is%3Aissue),
|
||||
[LangChain ChatBot](https://chat.langchain.com/)
|
||||
- type: input
|
||||
id: url
|
||||
attributes:
|
||||
label: URL
|
||||
description: URL to documentation
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
@@ -48,4 +55,4 @@ body:
|
||||
label: "Idea or request for content:"
|
||||
description: >
|
||||
Please describe as clearly as possible what topics you think are missing
|
||||
from the current documentation.
|
||||
from the current documentation.
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -26,4 +26,4 @@ Additional guidelines:
|
||||
- Changes should be backwards compatible.
|
||||
- If you are adding something to community, do not re-import it in langchain.
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17.
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, ccurme, vbarda, hwchase17.
|
||||
|
||||
4
.github/actions/people/app/main.py
vendored
4
.github/actions/people/app/main.py
vendored
@@ -537,7 +537,9 @@ if __name__ == "__main__":
|
||||
"nfcampos",
|
||||
"efriis",
|
||||
"eyurtsev",
|
||||
"rlancemartin"
|
||||
"rlancemartin",
|
||||
"ccurme",
|
||||
"vbarda",
|
||||
}
|
||||
hidden_logins = {
|
||||
"dev2049",
|
||||
|
||||
55
.github/scripts/check_diff.py
vendored
55
.github/scripts/check_diff.py
vendored
@@ -1,7 +1,11 @@
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
from typing import Dict
|
||||
from typing import Dict, List, Set
|
||||
|
||||
import tomllib
|
||||
from collections import defaultdict
|
||||
import glob
|
||||
|
||||
LANGCHAIN_DIRS = [
|
||||
"libs/core",
|
||||
@@ -11,6 +15,38 @@ LANGCHAIN_DIRS = [
|
||||
"libs/experimental",
|
||||
]
|
||||
|
||||
def all_package_dirs() -> Set[str]:
|
||||
return {"/".join(path.split("/")[:-1]) for path in glob.glob("./libs/**/pyproject.toml", recursive=True)}
|
||||
|
||||
|
||||
def dependents_graph() -> dict:
|
||||
dependents = defaultdict(set)
|
||||
|
||||
for path in glob.glob("./libs/**/pyproject.toml", recursive=True):
|
||||
if "template" in path:
|
||||
continue
|
||||
with open(path, "rb") as f:
|
||||
pyproject = tomllib.load(f)['tool']['poetry']
|
||||
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
|
||||
for dep in pyproject['dependencies']:
|
||||
if "langchain" in dep:
|
||||
dependents[dep].add(pkg_dir)
|
||||
return dependents
|
||||
|
||||
|
||||
def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
|
||||
updated = set()
|
||||
for dir_ in dirs_to_eval:
|
||||
# handle core manually because it has so many dependents
|
||||
if "core" in dir_:
|
||||
updated.add(dir_)
|
||||
continue
|
||||
pkg = "langchain-" + dir_.split("/")[-1]
|
||||
updated.update(dependents[pkg])
|
||||
updated.add(dir_)
|
||||
return list(updated)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
files = sys.argv[1:]
|
||||
|
||||
@@ -21,10 +57,11 @@ if __name__ == "__main__":
|
||||
}
|
||||
docs_edited = False
|
||||
|
||||
if len(files) == 300:
|
||||
if len(files) >= 300:
|
||||
# max diff length is 300 files - there are likely files missing
|
||||
raise ValueError("Max diff reached. Please manually run CI on changed libs.")
|
||||
|
||||
dirs_to_run["lint"] = all_package_dirs()
|
||||
dirs_to_run["test"] = all_package_dirs()
|
||||
dirs_to_run["extended-test"] = set(LANGCHAIN_DIRS)
|
||||
for file in files:
|
||||
if any(
|
||||
file.startswith(dir_)
|
||||
@@ -81,14 +118,16 @@ if __name__ == "__main__":
|
||||
docs_edited = True
|
||||
dirs_to_run["lint"].add(".")
|
||||
|
||||
dependents = dependents_graph()
|
||||
|
||||
outputs = {
|
||||
"dirs-to-lint": list(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"]
|
||||
"dirs-to-lint": add_dependents(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
),
|
||||
"dirs-to-test": list(dirs_to_run["test"] | dirs_to_run["extended-test"]),
|
||||
"dirs-to-test": add_dependents(dirs_to_run["test"] | dirs_to_run["extended-test"], dependents),
|
||||
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
|
||||
"docs-edited": "true" if docs_edited else "",
|
||||
}
|
||||
for key, value in outputs.items():
|
||||
json_output = json.dumps(value)
|
||||
print(f"{key}={json_output}") # noqa: T201
|
||||
print(f"{key}={json_output}")
|
||||
|
||||
2
.github/scripts/get_min_versions.py
vendored
2
.github/scripts/get_min_versions.py
vendored
@@ -76,4 +76,4 @@ if __name__ == "__main__":
|
||||
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
) # noqa: T201
|
||||
)
|
||||
|
||||
7
.github/workflows/.codespell-exclude
vendored
Normal file
7
.github/workflows/.codespell-exclude
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
libs/community/langchain_community/llms/yuan2.py
|
||||
"NotIn": "not in",
|
||||
- `/checkin`: Check-in
|
||||
docs/docs/integrations/providers/trulens.mdx
|
||||
self.assertIn(
|
||||
from trulens_eval import Tru
|
||||
tru = Tru()
|
||||
@@ -24,6 +24,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
1
.github/workflows/_dependencies.yml
vendored
1
.github/workflows/_dependencies.yml
vendored
@@ -28,6 +28,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: dependency checks ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
8
.github/workflows/_integration_test.yml
vendored
8
.github/workflows/_integration_test.yml
vendored
@@ -12,7 +12,6 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
environment: Scheduled testing
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
@@ -53,8 +52,15 @@ jobs:
|
||||
shell: bash
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
2
.github/workflows/_lint.yml
vendored
2
.github/workflows/_lint.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
# so linting on fewer versions makes CI faster.
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
63
.github/workflows/_release.yml
vendored
63
.github/workflows/_release.yml
vendored
@@ -72,10 +72,67 @@ jobs:
|
||||
run: |
|
||||
echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT
|
||||
echo version="$(poetry version --short)" >> $GITHUB_OUTPUT
|
||||
release-notes:
|
||||
needs:
|
||||
- build
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
release-body: ${{ steps.generate-release-body.outputs.release-body }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain
|
||||
path: langchain
|
||||
sparse-checkout: | # this only grabs files for relevant dir
|
||||
${{ inputs.working-directory }}
|
||||
ref: master # this scopes to just master branch
|
||||
fetch-depth: 0 # this fetches entire commit history
|
||||
- name: Check Tags
|
||||
id: check-tags
|
||||
shell: bash
|
||||
working-directory: langchain/${{ inputs.working-directory }}
|
||||
env:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
run: |
|
||||
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
|
||||
echo $REGEX
|
||||
PREV_TAG=$(git tag --sort=-creatordate | grep -P $REGEX || true | head -1)
|
||||
TAG="${PKG_NAME}==${VERSION}"
|
||||
if [ "$TAG" == "$PREV_TAG" ]; then
|
||||
echo "No new version to release"
|
||||
exit 1
|
||||
fi
|
||||
echo tag="$TAG" >> $GITHUB_OUTPUT
|
||||
echo prev-tag="$PREV_TAG" >> $GITHUB_OUTPUT
|
||||
- name: Generate release body
|
||||
id: generate-release-body
|
||||
working-directory: langchain
|
||||
env:
|
||||
WORKING_DIR: ${{ inputs.working-directory }}
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
TAG: ${{ steps.check-tags.outputs.tag }}
|
||||
PREV_TAG: ${{ steps.check-tags.outputs.prev-tag }}
|
||||
run: |
|
||||
PREAMBLE="Changes since $PREV_TAG"
|
||||
# if PREV_TAG is empty, then we are releasing the first version
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
PREAMBLE="Initial release"
|
||||
PREV_TAG=$(git rev-list --max-parents=0 HEAD)
|
||||
fi
|
||||
{
|
||||
echo 'release-body<<EOF'
|
||||
echo "# Release $TAG"
|
||||
echo $PREAMBLE
|
||||
echo
|
||||
git log --format="%s" "$PREV_TAG"..HEAD -- $WORKING_DIR
|
||||
echo EOF
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
test-pypi-publish:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
uses:
|
||||
./.github/workflows/_test_release.yml
|
||||
with:
|
||||
@@ -86,6 +143,7 @@ jobs:
|
||||
pre-release-checks:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -229,6 +287,7 @@ jobs:
|
||||
publish:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
runs-on: ubuntu-latest
|
||||
@@ -270,6 +329,7 @@ jobs:
|
||||
mark-release:
|
||||
needs:
|
||||
- build
|
||||
- release-notes
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
- publish
|
||||
@@ -306,5 +366,6 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
generateReleaseNotes: false
|
||||
tag: ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}
|
||||
body: "# Release ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}\n\nPackage-specific release note generation coming soon."
|
||||
body: ${{ needs.release-notes.outputs.release-body }}
|
||||
commit: ${{ github.sha }}
|
||||
makeLatest: ${{ needs.build.outputs.pkg-name == 'langchain-core'}}
|
||||
|
||||
1
.github/workflows/_test.yml
vendored
1
.github/workflows/_test.yml
vendored
@@ -28,6 +28,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "make test #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
2
.github/workflows/_test_doc_imports.yml
vendored
2
.github/workflows/_test_doc_imports.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "check doc imports #${{ matrix.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
1
.github/workflows/check-broken-links.yml
vendored
1
.github/workflows/check-broken-links.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
|
||||
jobs:
|
||||
check-links:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
7
.github/workflows/check_diffs.yml
vendored
7
.github/workflows/check_diffs.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
python-version: '3.11'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- id: set-matrix
|
||||
@@ -104,6 +104,7 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
@@ -123,7 +124,9 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing --with test
|
||||
poetry install --with test
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install -r extended_testing_deps.txt
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
31
.github/workflows/check_new_docs.yml
vendored
Normal file
31
.github/workflows/check_new_docs.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
name: Integration docs lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- name: Check new docs
|
||||
run: |
|
||||
python docs/scripts/check_templates.py ${{ steps.files.outputs.added }}
|
||||
12
.github/workflows/codespell.yml
vendored
12
.github/workflows/codespell.yml
vendored
@@ -29,9 +29,9 @@ jobs:
|
||||
python .github/workflows/extract_ignored_words_list.py
|
||||
id: extract_ignore_words
|
||||
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
skip: guide_imports.json,*.ambr,./cookbook/data/imdb_top_1000.csv,*.lock
|
||||
ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }}
|
||||
exclude_file: libs/community/langchain_community/llms/yuan2.py
|
||||
# - name: Codespell
|
||||
# uses: codespell-project/actions-codespell@v2
|
||||
# with:
|
||||
# skip: guide_imports.json,*.ambr,./cookbook/data/imdb_top_1000.csv,*.lock
|
||||
# ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }}
|
||||
# exclude_file: ./.github/workflows/codespell-exclude
|
||||
|
||||
@@ -7,4 +7,4 @@ ignore_words_list = (
|
||||
pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list")
|
||||
)
|
||||
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}") # noqa: T201
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}")
|
||||
|
||||
68
.github/workflows/scheduled_test.yml
vendored
68
.github/workflows/scheduled_test.yml
vendored
@@ -10,6 +10,8 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -25,16 +27,45 @@ jobs:
|
||||
- "libs/partners/groq"
|
||||
- "libs/partners/mistralai"
|
||||
- "libs/partners/together"
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
- "libs/partners/cohere"
|
||||
- "libs/partners/google-vertexai"
|
||||
- "libs/partners/google-genai"
|
||||
- "libs/partners/aws"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
path: langchain
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-cohere
|
||||
path: langchain-cohere
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-aws
|
||||
path: langchain-aws
|
||||
|
||||
- name: Move libs
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/cohere
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-cohere/libs/cohere langchain/libs/partners/cohere
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
@@ -43,16 +74,20 @@ jobs:
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: Run integration tests
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
shell: bash
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
@@ -67,12 +102,25 @@ jobs:
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
run: |
|
||||
make integration_test
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
make integration_tests
|
||||
|
||||
- name: Remove external libraries
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/cohere \
|
||||
langchain/libs/partners/aws
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
shell: bash
|
||||
working-directory: langchain
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -133,6 +133,7 @@ env.bak/
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.mypy_cache_test/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
@@ -178,3 +179,4 @@ _dist
|
||||
docs/docs/templates
|
||||
|
||||
prof
|
||||
virtualenv/
|
||||
|
||||
11
Makefile
11
Makefile
@@ -32,10 +32,19 @@ api_docs_build:
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
cd docs/api_reference && poetry run make html
|
||||
|
||||
API_PKG ?= text-splitters
|
||||
|
||||
api_docs_quick_preview:
|
||||
poetry run pip install "pydantic<2"
|
||||
poetry run python docs/api_reference/create_api_rst.py $(API_PKG)
|
||||
cd docs/api_reference && poetry run make html
|
||||
open docs/api_reference/_build/html/$(shell echo $(API_PKG) | sed 's/-/_/g')_api_reference.html
|
||||
|
||||
## api_docs_clean: Clean the API Reference documentation build artifacts.
|
||||
api_docs_clean:
|
||||
find ./docs/api_reference -name '*_api_reference.rst' -delete
|
||||
cd docs/api_reference && poetry run make clean
|
||||
git clean -fdX ./docs/api_reference
|
||||
|
||||
|
||||
## api_docs_linkcheck: Run linkchecker on the API Reference documentation.
|
||||
api_docs_linkcheck:
|
||||
|
||||
75
README.md
75
README.md
@@ -2,17 +2,17 @@
|
||||
|
||||
⚡ Build context-aware reasoning applications ⚡
|
||||
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/check_diffs.yml)
|
||||
[](https://pepy.tech/project/langchain-core)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://twitter.com/langchainai)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://pypistats.org/packages/langchain-core)
|
||||
[](https://star-history.com/#langchain-ai/langchain)
|
||||
[](https://libraries.io/github/langchain-ai/langchain)
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://star-history.com/#langchain-ai/langchain)
|
||||
[](https://libraries.io/github/langchain-ai/langchain)
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://twitter.com/langchainai)
|
||||
|
||||
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
@@ -38,22 +38,22 @@ conda install langchain -c conda-forge
|
||||
|
||||
For these applications, LangChain simplifies the entire application lifecycle:
|
||||
|
||||
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/docs/expression_language/) and [components](https://python.langchain.com/docs/modules/). Integrate with hundreds of [third-party providers](https://python.langchain.com/docs/integrations/platforms/).
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://python.langchain.com/docs/langsmith/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/docs/langserve).
|
||||
- **Open-source libraries**: Build your applications using LangChain's [modular building blocks](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel) and [components](https://python.langchain.com/v0.2/docs/concepts/#components). Integrate with hundreds of [third-party providers](https://python.langchain.com/v0.2/docs/integrations/platforms/).
|
||||
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
|
||||
- **Deployment**: Turn any chain into a REST API with [LangServe](https://python.langchain.com/v0.2/docs/langserve/).
|
||||
|
||||
### Open-source libraries
|
||||
- **`langchain-core`**: Base abstractions and LangChain Expression Language.
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[`LangGraph`](https://python.langchain.com/docs/langgraph)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
- **[`LangGraph`](https://langchain-ai.github.io/langgraph/)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
### Productionization:
|
||||
- **[LangSmith](https://python.langchain.com/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
- **[LangSmith](https://docs.smith.langchain.com/)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
### Deployment:
|
||||
- **[LangServe](https://python.langchain.com/docs/langserve)**: A library for deploying LangChain chains as REST APIs.
|
||||
- **[LangServe](https://python.langchain.com/v0.2/docs/langserve/)**: A library for deploying LangChain chains as REST APIs.
|
||||
|
||||

|
||||
|
||||
@@ -61,20 +61,20 @@ For these applications, LangChain simplifies the entire application lifecycle:
|
||||
|
||||
**❓ Question answering with RAG**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/question_answering/)
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/rag/)
|
||||
- End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain)
|
||||
|
||||
**🧱 Extracting structured output**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/extraction/)
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/extraction/)
|
||||
- End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain-extract/)
|
||||
|
||||
**🤖 Chatbots**
|
||||
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/chatbots)
|
||||
- [Documentation](https://python.langchain.com/v0.2/docs/tutorials/chatbot/)
|
||||
- End-to-end Example: [Web LangChain (web researcher chatbot)](https://weblangchain.vercel.app) and [repo](https://github.com/langchain-ai/weblangchain)
|
||||
|
||||
And much more! Head to the [Use cases](https://python.langchain.com/docs/use_cases/) section of the docs for more.
|
||||
And much more! Head to the [Tutorials](https://python.langchain.com/v0.2/docs/tutorials/) section of the docs for more.
|
||||
|
||||
## 🚀 How does LangChain help?
|
||||
The main value props of the LangChain libraries are:
|
||||
@@ -87,49 +87,50 @@ Off-the-shelf chains make it easy to get started. Components make it easy to cus
|
||||
|
||||
LCEL is the foundation of many of LangChain's components, and is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.
|
||||
|
||||
- **[Overview](https://python.langchain.com/docs/expression_language/)**: LCEL and its benefits
|
||||
- **[Interface](https://python.langchain.com/docs/expression_language/interface)**: The standard interface for LCEL objects
|
||||
- **[Primitives](https://python.langchain.com/docs/expression_language/primitives)**: More on the primitives LCEL includes
|
||||
- **[Overview](https://python.langchain.com/v0.2/docs/concepts/#langchain-expression-language-lcel)**: LCEL and its benefits
|
||||
- **[Interface](https://python.langchain.com/v0.2/docs/concepts/#runnable-interface)**: The standard Runnable interface for LCEL objects
|
||||
- **[Primitives](https://python.langchain.com/v0.2/docs/how_to/#langchain-expression-language-lcel)**: More on the primitives LCEL includes
|
||||
- **[Cheatsheet](https://python.langchain.com/v0.2/docs/how_to/lcel_cheatsheet/)**: Quick overview of the most common usage patterns
|
||||
|
||||
## Components
|
||||
|
||||
Components fall into the following **modules**:
|
||||
|
||||
**📃 Model I/O:**
|
||||
**📃 Model I/O**
|
||||
|
||||
This includes [prompt management](https://python.langchain.com/docs/modules/model_io/prompts/), [prompt optimization](https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/), a generic interface for [chat models](https://python.langchain.com/docs/modules/model_io/chat/) and [LLMs](https://python.langchain.com/docs/modules/model_io/llms/), and common utilities for working with [model outputs](https://python.langchain.com/docs/modules/model_io/output_parsers/).
|
||||
This includes [prompt management](https://python.langchain.com/v0.2/docs/concepts/#prompt-templates), [prompt optimization](https://python.langchain.com/v0.2/docs/concepts/#example-selectors), a generic interface for [chat models](https://python.langchain.com/v0.2/docs/concepts/#chat-models) and [LLMs](https://python.langchain.com/v0.2/docs/concepts/#llms), and common utilities for working with [model outputs](https://python.langchain.com/v0.2/docs/concepts/#output-parsers).
|
||||
|
||||
**📚 Retrieval:**
|
||||
**📚 Retrieval**
|
||||
|
||||
Retrieval Augmented Generation involves [loading data](https://python.langchain.com/docs/modules/data_connection/document_loaders/) from a variety of sources, [preparing it](https://python.langchain.com/docs/modules/data_connection/document_loaders/), [then retrieving it](https://python.langchain.com/docs/modules/data_connection/retrievers/) for use in the generation step.
|
||||
Retrieval Augmented Generation involves [loading data](https://python.langchain.com/v0.2/docs/concepts/#document-loaders) from a variety of sources, [preparing it](https://python.langchain.com/v0.2/docs/concepts/#text-splitters), then [searching over (a.k.a. retrieving from)](https://python.langchain.com/v0.2/docs/concepts/#retrievers) it for use in the generation step.
|
||||
|
||||
**🤖 Agents:**
|
||||
**🤖 Agents**
|
||||
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete done. LangChain provides a [standard interface for agents](https://python.langchain.com/docs/modules/agents/), a [selection of agents](https://python.langchain.com/docs/modules/agents/agent_types/) to choose from, and examples of end-to-end agents.
|
||||
Agents allow an LLM autonomy over how a task is accomplished. Agents make decisions about which Actions to take, then take that Action, observe the result, and repeat until the task is complete. LangChain provides a [standard interface for agents](https://python.langchain.com/v0.2/docs/concepts/#agents) along with the [LangGraph](https://github.com/langchain-ai/langgraph) extension for building custom agents.
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
Please see [here](https://python.langchain.com) for full documentation, which includes:
|
||||
|
||||
- [Getting started](https://python.langchain.com/docs/get_started/introduction): installation, setting up the environment, simple examples
|
||||
- [Use case](https://python.langchain.com/docs/use_cases/) walkthroughs and best practice [guides](https://python.langchain.com/docs/guides/)
|
||||
- Overviews of the [interfaces](https://python.langchain.com/docs/expression_language/), [components](https://python.langchain.com/docs/modules/), and [integrations](https://python.langchain.com/docs/integrations/providers)
|
||||
|
||||
You can also check out the full [API Reference docs](https://api.python.langchain.com).
|
||||
- [Introduction](https://python.langchain.com/v0.2/docs/introduction/): Overview of the framework and the structure of the docs.
|
||||
- [Tutorials](https://python.langchain.com/docs/use_cases/): If you're looking to build something specific or are more of a hands-on learner, check out our tutorials. This is the best place to get started.
|
||||
- [How-to guides](https://python.langchain.com/v0.2/docs/how_to/): Answers to “How do I….?” type questions. These guides are goal-oriented and concrete; they're meant to help you complete a specific task.
|
||||
- [Conceptual guide](https://python.langchain.com/v0.2/docs/concepts/): Conceptual explanations of the key parts of the framework.
|
||||
- [API Reference](https://api.python.langchain.com): Thorough documentation of every class and method.
|
||||
|
||||
## 🌐 Ecosystem
|
||||
|
||||
- [🦜🛠️ LangSmith](https://python.langchain.com/docs/langsmith/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://python.langchain.com/docs/langgraph): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
- [🦜🛠️ LangSmith](https://docs.smith.langchain.com/): Tracing and evaluating your language model applications and intelligent agents to help you move from prototype to production.
|
||||
- [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/): Creating stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
- [🦜🏓 LangServe](https://python.langchain.com/docs/langserve): Deploying LangChain runnables and chains as REST APIs.
|
||||
- [LangChain Templates](https://python.langchain.com/docs/templates/): Example applications hosted with LangServe.
|
||||
- [LangChain Templates](https://python.langchain.com/v0.2/docs/templates/): Example applications hosted with LangServe.
|
||||
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see [here](https://python.langchain.com/docs/contributing/).
|
||||
For detailed information on how to contribute, see [here](https://python.langchain.com/v0.2/docs/contributing/).
|
||||
|
||||
## 🌟 Contributors
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Needed synce jupyter runs an async eventloop\n",
|
||||
"# Needed since jupyter runs an async eventloop\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -273,7 +273,7 @@
|
||||
"source": [
|
||||
"# Tool schema for querying SQL db\n",
|
||||
"class create_df_from_sql(BaseModel):\n",
|
||||
" \"\"\"Execute a PostgreSQL SELECT statement and use the results to create a DataFrame with the given colum names.\"\"\"\n",
|
||||
" \"\"\"Execute a PostgreSQL SELECT statement and use the results to create a DataFrame with the given column names.\"\"\"\n",
|
||||
"\n",
|
||||
" select_query: str = Field(..., description=\"A PostgreSQL SELECT statement.\")\n",
|
||||
" # We're going to convert the results to a Pandas DataFrame that we pass\n",
|
||||
|
||||
497
cookbook/nomic_multimodal_rag.ipynb
Normal file
497
cookbook/nomic_multimodal_rag.ipynb
Normal file
@@ -0,0 +1,497 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9fc3897d-176f-4729-8fd1-cfb4add53abd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Nomic multi-modal RAG\n",
|
||||
"\n",
|
||||
"Many documents contain a mixture of content types, including text and images. \n",
|
||||
"\n",
|
||||
"Yet, information captured in images is lost in most RAG applications.\n",
|
||||
"\n",
|
||||
"With the emergence of multimodal LLMs, like [GPT-4V](https://openai.com/research/gpt-4v-system-card), it is worth considering how to utilize images in RAG:\n",
|
||||
"\n",
|
||||
"In this demo we\n",
|
||||
"\n",
|
||||
"* Use multimodal embeddings from Nomic Embed [Vision](https://huggingface.co/nomic-ai/nomic-embed-vision-v1.5) and [Text](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) to embed images and text\n",
|
||||
"* Retrieve both using similarity search\n",
|
||||
"* Pass raw images and text chunks to a multimodal LLM for answer synthesis \n",
|
||||
"\n",
|
||||
"## Signup\n",
|
||||
"\n",
|
||||
"Get your API token, then run:\n",
|
||||
"```\n",
|
||||
"! nomic login\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Then run with your generated API token \n",
|
||||
"```\n",
|
||||
"! nomic login < token > \n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Packages\n",
|
||||
"\n",
|
||||
"For `unstructured`, you will also need `poppler` ([installation instructions](https://pdf2image.readthedocs.io/en/latest/installation.html)) and `tesseract` ([installation instructions](https://tesseract-ocr.github.io/tessdoc/Installation.html)) in your system."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54926b9b-75c2-4cd4-8f14-b3882a0d370b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! nomic login token"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "febbc459-ebba-4c1a-a52b-fed7731593f8",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "acbdc603-39e2-4a5f-836c-2bbaecd46b0b",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# lock to 0.10.19 due to a persistent bug in more recent versions\n",
|
||||
"! pip install \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml pillow matplotlib tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e94b3fb-8e3e-4736-be0a-ad881626c7bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Data Loading\n",
|
||||
"\n",
|
||||
"### Partition PDF text and images\n",
|
||||
" \n",
|
||||
"Let's look at an example pdfs containing interesting images.\n",
|
||||
"\n",
|
||||
"1/ Art from the J Paul Getty museum:\n",
|
||||
"\n",
|
||||
" * Here is a [zip file](https://drive.google.com/file/d/18kRKbq2dqAhhJ3DfZRnYcTBEUfYxe1YR/view?usp=sharing) with the PDF and the already extracted images. \n",
|
||||
"* https://www.getty.edu/publications/resources/virtuallibrary/0892360224.pdf\n",
|
||||
"\n",
|
||||
"2/ Famous photographs from library of congress:\n",
|
||||
"\n",
|
||||
"* https://www.loc.gov/lcm/pdf/LCM_2020_1112.pdf\n",
|
||||
"* We'll use this as an example below\n",
|
||||
"\n",
|
||||
"We can use `partition_pdf` below from [Unstructured](https://unstructured-io.github.io/unstructured/introduction.html#key-concepts) to extract text and images.\n",
|
||||
"\n",
|
||||
"To supply this to extract the images:\n",
|
||||
"```\n",
|
||||
"extract_images_in_pdf=True\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"If using this zip file, then you can simply process the text only with:\n",
|
||||
"```\n",
|
||||
"extract_images_in_pdf=False\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9646b524-71a7-4b2a-bdc8-0b81f77e968f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Folder with pdf and extracted images\n",
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"# replace with actual path to images\n",
|
||||
"path = Path(\"../art\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "77f096ab-a933-41d0-8f4e-1efc83998fc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"path.resolve()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc4839c0-8773-4a07-ba59-5364501269b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Extract images, tables, and chunk text\n",
|
||||
"from unstructured.partition.pdf import partition_pdf\n",
|
||||
"\n",
|
||||
"raw_pdf_elements = partition_pdf(\n",
|
||||
" filename=str(path.resolve()) + \"/getty.pdf\",\n",
|
||||
" extract_images_in_pdf=False,\n",
|
||||
" infer_table_structure=True,\n",
|
||||
" chunking_strategy=\"by_title\",\n",
|
||||
" max_characters=4000,\n",
|
||||
" new_after_n_chars=3800,\n",
|
||||
" combine_text_under_n_chars=2000,\n",
|
||||
" image_output_dir_path=path,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "969545ad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Categorize text elements by type\n",
|
||||
"tables = []\n",
|
||||
"texts = []\n",
|
||||
"for element in raw_pdf_elements:\n",
|
||||
" if \"unstructured.documents.elements.Table\" in str(type(element)):\n",
|
||||
" tables.append(str(element))\n",
|
||||
" elif \"unstructured.documents.elements.CompositeElement\" in str(type(element)):\n",
|
||||
" texts.append(str(element))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d8e6349-1547-4cbf-9c6f-491d8610ec10",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal embeddings with our document\n",
|
||||
"\n",
|
||||
"We will use [nomic-embed-vision-v1.5](https://huggingface.co/nomic-ai/nomic-embed-vision-v1.5) embeddings. This model is aligned \n",
|
||||
"to [nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) allowing for multimodal semantic search and Multimodal RAG!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4bc15842-cb95-4f84-9eb5-656b0282a800",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"import chromadb\n",
|
||||
"import numpy as np\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"from PIL import Image as _PILImage\n",
|
||||
"\n",
|
||||
"# Create chroma\n",
|
||||
"text_vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_clip_photos_text\",\n",
|
||||
" embedding_function=NomicEmbeddings(\n",
|
||||
" vision_model=\"nomic-embed-vision-v1.5\", model=\"nomic-embed-text-v1.5\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"image_vectorstore = Chroma(\n",
|
||||
" collection_name=\"mm_rag_clip_photos_image\",\n",
|
||||
" embedding_function=NomicEmbeddings(\n",
|
||||
" vision_model=\"nomic-embed-vision-v1.5\", model=\"nomic-embed-text-v1.5\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Get image URIs with .jpg extension only\n",
|
||||
"image_uris = sorted(\n",
|
||||
" [\n",
|
||||
" os.path.join(path, image_name)\n",
|
||||
" for image_name in os.listdir(path)\n",
|
||||
" if image_name.endswith(\".jpg\")\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add images\n",
|
||||
"image_vectorstore.add_images(uris=image_uris)\n",
|
||||
"\n",
|
||||
"# Add documents\n",
|
||||
"text_vectorstore.add_texts(texts=texts)\n",
|
||||
"\n",
|
||||
"# Make retriever\n",
|
||||
"image_retriever = image_vectorstore.as_retriever()\n",
|
||||
"text_retriever = text_vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02a186d0-27e0-4820-8092-63b5349dd25d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## RAG\n",
|
||||
"\n",
|
||||
"`vectorstore.add_images` will store / retrieve images as base64 encoded strings.\n",
|
||||
"\n",
|
||||
"These can be passed to [GPT-4V](https://platform.openai.com/docs/guides/vision)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "344f56a8-0dc3-433e-851c-3f7600c7a72b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"import io\n",
|
||||
"from io import BytesIO\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"from PIL import Image\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def resize_base64_image(base64_string, size=(128, 128)):\n",
|
||||
" \"\"\"\n",
|
||||
" Resize an image encoded as a Base64 string.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" base64_string (str): Base64 string of the original image.\n",
|
||||
" size (tuple): Desired size of the image as (width, height).\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" str: Base64 string of the resized image.\n",
|
||||
" \"\"\"\n",
|
||||
" # Decode the Base64 string\n",
|
||||
" img_data = base64.b64decode(base64_string)\n",
|
||||
" img = Image.open(io.BytesIO(img_data))\n",
|
||||
"\n",
|
||||
" # Resize the image\n",
|
||||
" resized_img = img.resize(size, Image.LANCZOS)\n",
|
||||
"\n",
|
||||
" # Save the resized image to a bytes buffer\n",
|
||||
" buffered = io.BytesIO()\n",
|
||||
" resized_img.save(buffered, format=img.format)\n",
|
||||
"\n",
|
||||
" # Encode the resized image to Base64\n",
|
||||
" return base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def is_base64(s):\n",
|
||||
" \"\"\"Check if a string is Base64 encoded\"\"\"\n",
|
||||
" try:\n",
|
||||
" return base64.b64encode(base64.b64decode(s)) == s.encode()\n",
|
||||
" except Exception:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def split_image_text_types(docs):\n",
|
||||
" \"\"\"Split numpy array images and texts\"\"\"\n",
|
||||
" images = []\n",
|
||||
" text = []\n",
|
||||
" for doc in docs:\n",
|
||||
" doc = doc.page_content # Extract Document contents\n",
|
||||
" if is_base64(doc):\n",
|
||||
" # Resize image to avoid OAI server error\n",
|
||||
" images.append(\n",
|
||||
" resize_base64_image(doc, size=(250, 250))\n",
|
||||
" ) # base64 encoded str\n",
|
||||
" else:\n",
|
||||
" text.append(doc)\n",
|
||||
" return {\"images\": images, \"texts\": text}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23a2c1d8-fea6-4152-b184-3172dd46c735",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Currently, we format the inputs using a `RunnableLambda` while we add image support to `ChatPromptTemplates`.\n",
|
||||
"\n",
|
||||
"Our runnable follows the classic RAG flow - \n",
|
||||
"\n",
|
||||
"* We first compute the context (both \"texts\" and \"images\" in this case) and the question (just a RunnablePassthrough here) \n",
|
||||
"* Then we pass this into our prompt template, which is a custom function that formats the message for the gpt-4-vision-preview model. \n",
|
||||
"* And finally we parse the output as a string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5d8919dc-c238-4746-86ba-45d940a7d260",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c93fab3-74c4-4f1d-958a-0bc4cdd0797e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def prompt_func(data_dict):\n",
|
||||
" # Joining the context texts into a single string\n",
|
||||
" formatted_texts = \"\\n\".join(data_dict[\"text_context\"][\"texts\"])\n",
|
||||
" messages = []\n",
|
||||
"\n",
|
||||
" # Adding image(s) to the messages if present\n",
|
||||
" if data_dict[\"image_context\"][\"images\"]:\n",
|
||||
" image_message = {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" \"url\": f\"data:image/jpeg;base64,{data_dict['image_context']['images'][0]}\"\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" messages.append(image_message)\n",
|
||||
"\n",
|
||||
" # Adding the text message for analysis\n",
|
||||
" text_message = {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": (\n",
|
||||
" \"As an expert art critic and historian, your task is to analyze and interpret images, \"\n",
|
||||
" \"considering their historical and cultural significance. Alongside the images, you will be \"\n",
|
||||
" \"provided with related text to offer context. Both will be retrieved from a vectorstore based \"\n",
|
||||
" \"on user-input keywords. Please use your extensive knowledge and analytical skills to provide a \"\n",
|
||||
" \"comprehensive summary that includes:\\n\"\n",
|
||||
" \"- A detailed description of the visual elements in the image.\\n\"\n",
|
||||
" \"- The historical and cultural context of the image.\\n\"\n",
|
||||
" \"- An interpretation of the image's symbolism and meaning.\\n\"\n",
|
||||
" \"- Connections between the image and the related text.\\n\\n\"\n",
|
||||
" f\"User-provided keywords: {data_dict['question']}\\n\\n\"\n",
|
||||
" \"Text and / or tables:\\n\"\n",
|
||||
" f\"{formatted_texts}\"\n",
|
||||
" ),\n",
|
||||
" }\n",
|
||||
" messages.append(text_message)\n",
|
||||
"\n",
|
||||
" return [HumanMessage(content=messages)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(temperature=0, model=\"gpt-4-vision-preview\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
"# RAG pipeline\n",
|
||||
"chain = (\n",
|
||||
" {\n",
|
||||
" \"text_context\": text_retriever | RunnableLambda(split_image_text_types),\n",
|
||||
" \"image_context\": image_retriever | RunnableLambda(split_image_text_types),\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
" }\n",
|
||||
" | RunnableLambda(prompt_func)\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1566096d-97c2-4ddc-ba4a-6ef88c525e4e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Test retrieval and run RAG"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "90121e56-674b-473b-871d-6e4753fd0c45",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import HTML, display\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def plt_img_base64(img_base64):\n",
|
||||
" # Create an HTML img tag with the base64 string as the source\n",
|
||||
" image_html = f'<img src=\"data:image/jpeg;base64,{img_base64}\" />'\n",
|
||||
"\n",
|
||||
" # Display the image by rendering the HTML\n",
|
||||
" display(HTML(image_html))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"docs = text_retriever.invoke(\"Women with children\", k=5)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
" else:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "44eaa532-f035-4c04-b578-02339d42554c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = image_retriever.invoke(\"Women with children\", k=5)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
" else:\n",
|
||||
" print(doc.page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "69fb15fd-76fc-49b4-806d-c4db2990027d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.invoke(\"Women with children\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "227f08b8-e732-4089-b65c-6eb6f9e48f15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see the images retrieved in the LangSmith trace:\n",
|
||||
"\n",
|
||||
"LangSmith [trace](https://smith.langchain.com/public/69c558a5-49dc-4c60-a49b-3adbb70f74c5/r/e872c2c8-528c-468f-aefd-8b5cd730a673)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -6,23 +6,24 @@
|
||||
"source": [
|
||||
"# Oracle AI Vector Search with Document Processing\n",
|
||||
"Oracle AI Vector Search is designed for Artificial Intelligence (AI) workloads that allows you to query data based on semantics, rather than keywords.\n",
|
||||
"One of the biggest benefit of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system. This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.\n",
|
||||
"One of the biggest benefits of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system.\n",
|
||||
"This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.\n",
|
||||
"\n",
|
||||
"In addition, because Oracle has been building database technologies for so long, your vectors can benefit from all of Oracle Database's most powerful features, like the following:\n",
|
||||
"In addition, your vectors can benefit from all of Oracle Database’s most powerful features, like the following:\n",
|
||||
"\n",
|
||||
" * Partitioning Support\n",
|
||||
" * Real Application Clusters scalability\n",
|
||||
" * Exadata smart scans\n",
|
||||
" * Shard processing across geographically distributed databases\n",
|
||||
" * Transactions\n",
|
||||
" * Parallel SQL\n",
|
||||
" * Disaster recovery\n",
|
||||
" * Security\n",
|
||||
" * Oracle Machine Learning\n",
|
||||
" * Oracle Graph Database\n",
|
||||
" * Oracle Spatial and Graph\n",
|
||||
" * Oracle Blockchain\n",
|
||||
" * JSON\n",
|
||||
" * [Partitioning Support](https://www.oracle.com/database/technologies/partitioning.html)\n",
|
||||
" * [Real Application Clusters scalability](https://www.oracle.com/database/real-application-clusters/)\n",
|
||||
" * [Exadata smart scans](https://www.oracle.com/database/technologies/exadata/software/smartscan/)\n",
|
||||
" * [Shard processing across geographically distributed databases](https://www.oracle.com/database/distributed-database/)\n",
|
||||
" * [Transactions](https://docs.oracle.com/en/database/oracle/oracle-database/23/cncpt/transactions.html)\n",
|
||||
" * [Parallel SQL](https://docs.oracle.com/en/database/oracle/oracle-database/21/vldbg/parallel-exec-intro.html#GUID-D28717E4-0F77-44F5-BB4E-234C31D4E4BA)\n",
|
||||
" * [Disaster recovery](https://www.oracle.com/database/data-guard/)\n",
|
||||
" * [Security](https://www.oracle.com/security/database-security/)\n",
|
||||
" * [Oracle Machine Learning](https://www.oracle.com/artificial-intelligence/database-machine-learning/)\n",
|
||||
" * [Oracle Graph Database](https://www.oracle.com/database/integrated-graph-database/)\n",
|
||||
" * [Oracle Spatial and Graph](https://www.oracle.com/database/spatial/)\n",
|
||||
" * [Oracle Blockchain](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_blockchain_table.html#GUID-B469E277-978E-4378-A8C1-26D3FF96C9A6)\n",
|
||||
" * [JSON](https://docs.oracle.com/en/database/oracle/oracle-database/23/adjsn/json-in-oracle-database.html)\n",
|
||||
"\n",
|
||||
"This guide demonstrates how Oracle AI Vector Search can be used with Langchain to serve an end-to-end RAG pipeline. This guide goes through examples of:\n",
|
||||
"\n",
|
||||
@@ -33,6 +34,13 @@
|
||||
" * Storing and Indexing them in a Vector Store and querying them for queries in OracleVS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you are just starting with Oracle Database, consider exploring the [free Oracle 23 AI](https://www.oracle.com/database/free/#resources) which provides a great introduction to setting up your database environment. While working with the database, it is often advisable to avoid using the system user by default; instead, you can create your own user for enhanced security and customization. For detailed steps on user creation, refer to our [end-to-end guide](https://github.com/langchain-ai/langchain/blob/master/cookbook/oracleai_demo.ipynb) which also shows how to set up a user in Oracle. Additionally, understanding user privileges is crucial for managing database security effectively. You can learn more about this topic in the official [Oracle guide](https://docs.oracle.com/en/database/oracle/oracle-database/19/admqs/administering-user-accounts-and-security.html#GUID-36B21D72-1BBB-46C9-A0C9-F0D2A8591B8D) on administering user accounts and security."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -78,8 +86,7 @@
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"# please make sure this user has sufficient privileges to perform all below\n",
|
||||
"# Update with your username, password, hostname, and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
@@ -89,40 +96,45 @@
|
||||
" print(\"Connection successful!\")\n",
|
||||
"\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error setting up user.');\n",
|
||||
" end;\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
"\n",
|
||||
" -- network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
" host => '*',\n",
|
||||
" ace => xs$ace_type(privilege_list => xs$name_list('connect'),\n",
|
||||
" principal_name => 'testuser',\n",
|
||||
" principal_type => xs_acl.ptype_db));\n",
|
||||
" end;\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" cursor.close()\n",
|
||||
" try:\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- Drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error dropping user: ' || SQLERRM);\n",
|
||||
" end;\n",
|
||||
" \n",
|
||||
" -- Create user and grant privileges\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
" \n",
|
||||
" -- Network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
" host => '*',\n",
|
||||
" ace => xs$ace_type(privilege_list => xs$name_list('connect'),\n",
|
||||
" principal_name => 'testuser',\n",
|
||||
" principal_type => xs_acl.ptype_db)\n",
|
||||
" );\n",
|
||||
" end;\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"User setup failed with error: {e}\")\n",
|
||||
" finally:\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"User setup failed!\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
" print(f\"Connection failed with error: {e}\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
@@ -131,13 +143,13 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Process Documents using Oracle AI\n",
|
||||
"Let's think about a scenario that the users have some documents in Oracle Database or in a file system. They want to use the data for Oracle AI Vector Search using Langchain.\n",
|
||||
"Consider the following scenario: users possess documents stored either in an Oracle Database or a file system and intend to utilize this data with Oracle AI Vector Search powered by Langchain.\n",
|
||||
"\n",
|
||||
"For that, the users need to do some document preprocessing. The first step would be to read the documents, generate their summary(if needed) and then chunk/split them if needed. After that, they need to generate the embeddings for those chunks and store into Oracle AI Vector Store. Finally, the users will perform some semantic queries on those data. \n",
|
||||
"To prepare the documents for analysis, a comprehensive preprocessing workflow is necessary. Initially, the documents must be retrieved, summarized (if required), and chunked as needed. Subsequent steps involve generating embeddings for these chunks and integrating them into the Oracle AI Vector Store. Users can then conduct semantic searches on this data.\n",
|
||||
"\n",
|
||||
"Oracle AI Vector Search Langchain library provides a range of document processing functionalities including document loading, splitting, generating summary and embeddings.\n",
|
||||
"The Oracle AI Vector Search Langchain library encompasses a suite of document processing tools that facilitate document loading, chunking, summary generation, and embedding creation.\n",
|
||||
"\n",
|
||||
"In the following sections, we will go through how to use Oracle AI Langchain APIs to achieve each of these functionalities individually. "
|
||||
"In the sections that follow, we will detail the utilization of Oracle AI Langchain APIs to effectively implement each of these processes."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -145,7 +157,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Demo User\n",
|
||||
"The following sample code will show how to connect to Oracle Database. "
|
||||
"The following sample code will show how to connect to Oracle Database. By default, python-oracledb runs in a ‘Thin’ mode which connects directly to Oracle Database. This mode does not need Oracle Client libraries. However, some additional functionality is available when python-oracledb uses them. Python-oracledb is said to be in ‘Thick’ mode when Oracle Client libraries are used. Both modes have comprehensive functionality supporting the Python Database API v2.0 Specification. See the following [guide](https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_a.html#featuresummary) that talks about features supported in each mode. You might want to switch to thick-mode if you are unable to use thin-mode."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -242,9 +254,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Now that we have a demo user and a demo table with some data, we just need to do one more setup. For embedding and summary, we have a few provider options that the users can choose from such as database, 3rd party providers like ocigenai, huggingface, openai, etc. If the users choose to use 3rd party provider, they need to create a credential with corresponding authentication information. On the other hand, if the users choose to use 'database' as provider, they need to load an onnx model to Oracle Database for embeddings; however, for summary, they don't need to do anything."
|
||||
"With the inclusion of a demo user and a populated sample table, the remaining configuration involves setting up embedding and summary functionalities. Users are presented with multiple provider options, including local database solutions and third-party services such as Ocigenai, Hugging Face, and OpenAI. Should users opt for a third-party provider, they are required to establish credentials containing the necessary authentication details. Conversely, if selecting a database as the provider for embeddings, it is necessary to upload an ONNX model to the Oracle Database. No additional setup is required for summary functionalities when using the database option."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -253,13 +263,13 @@
|
||||
"source": [
|
||||
"### Load ONNX Model\n",
|
||||
"\n",
|
||||
"To generate embeddings, Oracle provides a few provider options for users to choose from. The users can choose 'database' provider or some 3rd party providers like OCIGENAI, HuggingFace, etc.\n",
|
||||
"Oracle accommodates a variety of embedding providers, enabling users to choose between proprietary database solutions and third-party services such as OCIGENAI and HuggingFace. This selection dictates the methodology for generating and managing embeddings.\n",
|
||||
"\n",
|
||||
"***Note*** If the users choose database option, they need to load an ONNX model to Oracle Database. The users do not need to load an ONNX model to Oracle Database if they choose to use 3rd party provider to generate embeddings.\n",
|
||||
"***Important*** : Should users opt for the database option, they must upload an ONNX model into the Oracle Database. Conversely, if a third-party provider is selected for embedding generation, uploading an ONNX model to Oracle Database is not required.\n",
|
||||
"\n",
|
||||
"One of the core benefits of using an ONNX model is that the users do not need to transfer their data to 3rd party to generate embeddings. And also, since it does not involve any network or REST API calls, it may provide better performance.\n",
|
||||
"A significant advantage of utilizing an ONNX model directly within Oracle is the enhanced security and performance it offers by eliminating the need to transmit data to external parties. Additionally, this method avoids the latency typically associated with network or REST API calls.\n",
|
||||
"\n",
|
||||
"Here is the sample code to load an ONNX model to Oracle Database:"
|
||||
"Below is the example code to upload an ONNX model into Oracle Database:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -298,11 +308,11 @@
|
||||
"source": [
|
||||
"### Create Credential\n",
|
||||
"\n",
|
||||
"On the other hand, if the users choose to use 3rd party provider to generate embeddings and summary, they need to create credential to access 3rd party provider's end points.\n",
|
||||
"When selecting third-party providers for generating embeddings, users are required to establish credentials to securely access the provider's endpoints.\n",
|
||||
"\n",
|
||||
"***Note:*** The users do not need to create any credential if they choose to use 'database' provider to generate embeddings and summary. Should the users choose to 3rd party provider, they need to create credential for the 3rd party provider they want to use. \n",
|
||||
"***Important:*** No credentials are necessary when opting for the 'database' provider to generate embeddings. However, should users decide to utilize a third-party provider, they must create credentials specific to the chosen provider.\n",
|
||||
"\n",
|
||||
"Here is a sample example:"
|
||||
"Below is an illustrative example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -352,11 +362,11 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Documents\n",
|
||||
"The users can load the documents from Oracle Database or a file system or both. They just need to set the loader parameters accordingly. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"Users have the flexibility to load documents from either the Oracle Database, a file system, or both, by appropriately configuring the loader parameters. For comprehensive details on these parameters, please consult the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-73397E89-92FB-48ED-94BB-1AD960C4EA1F).\n",
|
||||
"\n",
|
||||
"The main benefit of using OracleDocLoader is that it can handle 150+ different file formats. You don't need to use different types of loader for different file formats. Here is the list formats that we support: [Oracle Text Supported Document Formats](https://docs.oracle.com/en/database/oracle/oracle-database/23/ccref/oracle-text-supported-document-formats.html)\n",
|
||||
"A significant advantage of utilizing OracleDocLoader is its capability to process over 150 distinct file formats, eliminating the need for multiple loaders for different document types. For a complete list of the supported formats, please refer to the [Oracle Text Supported Document Formats](https://docs.oracle.com/en/database/oracle/oracle-database/23/ccref/oracle-text-supported-document-formats.html).\n",
|
||||
"\n",
|
||||
"The following sample code will show how to do that:"
|
||||
"Below is a sample code snippet that demonstrates how to use OracleDocLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -399,7 +409,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Summary\n",
|
||||
"Now that the user loaded the documents, they may want to generate a summary for each document. The Oracle AI Vector Search Langchain library provides an API to do that. There are a few summary generation provider options including Database, OCIGENAI, HuggingFace and so on. The users can choose their preferred provider to generate a summary. Like before, they just need to set the summary parameters accordingly. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters."
|
||||
"Now that the user loaded the documents, they may want to generate a summary for each document. The Oracle AI Vector Search Langchain library offers a suite of APIs designed for document summarization. It supports multiple summarization providers such as Database, OCIGENAI, HuggingFace, among others, allowing users to select the provider that best meets their needs. To utilize these capabilities, users must configure the summary parameters as specified. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-EC9DDB58-6A15-4B36-BA66-ECBA20D2CE57)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -470,9 +480,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Split Documents\n",
|
||||
"The documents can be in different sizes: small, medium, large, or very large. The users like to split/chunk their documents into smaller pieces to generate embeddings. There are lots of different splitting customizations the users can do. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"The documents may vary in size, ranging from small to very large. Users often prefer to chunk their documents into smaller sections to facilitate the generation of embeddings. A wide array of customization options is available for this splitting process. For comprehensive details regarding these parameters, please consult the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-4E145629-7098-4C7C-804F-FC85D1F24240).\n",
|
||||
"\n",
|
||||
"The following sample code will show how to do that:"
|
||||
"Below is a sample code illustrating how to implement this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -513,14 +523,14 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Embeddings\n",
|
||||
"Now that the documents are chunked as per requirements, the users may want to generate embeddings for these chunks. Oracle AI Vector Search provides a number of ways to generate embeddings. The users can load an ONNX embedding model to Oracle Database and use it to generate embeddings or use some 3rd party API's end points to generate embeddings. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters."
|
||||
"Now that the documents are chunked as per requirements, the users may want to generate embeddings for these chunks. Oracle AI Vector Search provides multiple methods for generating embeddings, utilizing either locally hosted ONNX models or third-party APIs. For comprehensive instructions on configuring these alternatives, please refer to the [Oracle AI Vector Search Guide](https://docs.oracle.com/en/database/oracle/oracle-database/23/arpls/dbms_vector_chain1.html#GUID-C6439E94-4E86-4ECD-954E-4B73D53579DE)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** The users may need to set proxy if they want to use some 3rd party embedding generation providers other than 'database' provider (aka using ONNX model)."
|
||||
"***Note:*** Users may need to configure a proxy to utilize third-party embedding generation providers, excluding the 'database' provider that utilizes an ONNX model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -752,20 +762,18 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above example creates a vector store with DOT_PRODUCT distance strategy. \n",
|
||||
"\n",
|
||||
"However, the users can create Oracle AI Vector Store provides different distance strategies. Please see the [comprehensive guide](/docs/integrations/vectorstores/oracle) for more information."
|
||||
"The example provided illustrates the creation of a vector store using the DOT_PRODUCT distance strategy. Users have the flexibility to employ various distance strategies with the Oracle AI Vector Store, as detailed in our [comprehensive guide](https://python.langchain.com/v0.1/docs/integrations/vectorstores/oracle/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that we have embeddings stored in vector stores, let's create an index on them to get better semantic search performance during query time.\n",
|
||||
"With embeddings now stored in vector stores, it is advisable to establish an index to enhance semantic search performance during query execution.\n",
|
||||
"\n",
|
||||
"***Note*** If you are getting some insufficient memory error, please increase ***vector_memory_size*** in your database.\n",
|
||||
"***Note*** Should you encounter an \"insufficient memory\" error, it is recommended to increase the ***vector_memory_size*** in your database configuration\n",
|
||||
"\n",
|
||||
"Here is the sample code to create an index:"
|
||||
"Below is a sample code snippet for creating an index:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -785,9 +793,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above example creates a default HNSW index on the embeddings stored in 'oravs' table. The users can set different parameters as per their requirements. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"This example demonstrates the creation of a default HNSW index on embeddings within the 'oravs' table. Users may adjust various parameters according to their specific needs. For detailed information on these parameters, please consult the [Oracle AI Vector Search Guide book](https://docs.oracle.com/en/database/oracle/oracle-database/23/vecse/manage-different-categories-vector-indexes.html).\n",
|
||||
"\n",
|
||||
"Also, there are different types of vector indices that the users can create. Please see the [comprehensive guide](/docs/integrations/vectorstores/oracle) for more information.\n"
|
||||
"Additionally, various types of vector indices can be created to meet diverse requirements. More details can be found in our [comprehensive guide](https://python.langchain.com/v0.1/docs/integrations/vectorstores/oracle/).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -797,9 +805,9 @@
|
||||
"## Perform Semantic Search\n",
|
||||
"All set!\n",
|
||||
"\n",
|
||||
"We have processed the documents, stored them to vector store, and then created index to get better query performance. Now let's do some semantic searches.\n",
|
||||
"We have successfully processed the documents and stored them in the vector store, followed by the creation of an index to enhance query performance. We are now prepared to proceed with semantic searches.\n",
|
||||
"\n",
|
||||
"Here is the sample code for this:"
|
||||
"Below is the sample code for this process:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -36,7 +36,9 @@
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_documents(docs, embedding=UpstageEmbeddings())\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_documents(\n",
|
||||
" docs, embedding=UpstageEmbeddings(model=\"solar-embedding-1-large\")\n",
|
||||
")\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
|
||||
@@ -39,12 +39,10 @@
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# noqa\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"DOCSTORE_DIR = \".\"\n",
|
||||
"DOCSTORE_ID_KEY = \"doc_id\""
|
||||
|
||||
@@ -13,7 +13,7 @@ OUTPUT_NEW_DOCS_DIR = $(OUTPUT_NEW_DIR)/docs
|
||||
|
||||
PYTHON = .venv/bin/python
|
||||
|
||||
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm|ai21" | tr '\n' ' ')
|
||||
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm" | tr '\n' ' ')
|
||||
|
||||
PORT ?= 3001
|
||||
|
||||
@@ -35,19 +35,16 @@ generate-files:
|
||||
mkdir -p $(INTERMEDIATE_DIR)
|
||||
cp -r $(SOURCE_DIR)/* $(INTERMEDIATE_DIR)
|
||||
mkdir -p $(INTERMEDIATE_DIR)/templates
|
||||
cp ../templates/docs/INDEX.md $(INTERMEDIATE_DIR)/templates/index.md
|
||||
cp ../cookbook/README.md $(INTERMEDIATE_DIR)/cookbook.mdx
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/document_loader_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O $(INTERMEDIATE_DIR)/langserve.md
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langserve.md https://github.com/langchain-ai/langserve/tree/main/
|
||||
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langgraph/main/README.md -O $(INTERMEDIATE_DIR)/langgraph.md
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langgraph.md https://github.com/langchain-ai/langgraph/tree/main/
|
||||
|
||||
copy-infra:
|
||||
mkdir -p $(OUTPUT_NEW_DIR)
|
||||
cp -r src $(OUTPUT_NEW_DIR)
|
||||
@@ -69,9 +66,9 @@ md-sync:
|
||||
generate-references:
|
||||
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
build: install-py-deps generate-files copy-infra render md-sync generate-references
|
||||
build: install-py-deps generate-files copy-infra render md-sync
|
||||
|
||||
vercel-build: install-vercel-deps build
|
||||
vercel-build: install-vercel-deps build generate-references
|
||||
rm -rf docs
|
||||
mv $(OUTPUT_NEW_DOCS_DIR) docs
|
||||
rm -rf build
|
||||
|
||||
@@ -10,12 +10,21 @@ from pathlib import Path
|
||||
from typing import Dict, List, Literal, Optional, Sequence, TypedDict, Union
|
||||
|
||||
import toml
|
||||
import typing_extensions
|
||||
from langchain_core.runnables import Runnable, RunnableSerializable
|
||||
from pydantic import BaseModel
|
||||
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
|
||||
ClassKind = Literal[
|
||||
"TypedDict",
|
||||
"Regular",
|
||||
"Pydantic",
|
||||
"enum",
|
||||
"RunnablePydantic",
|
||||
"RunnableNonPydantic",
|
||||
]
|
||||
|
||||
|
||||
class ClassInfo(TypedDict):
|
||||
@@ -69,8 +78,36 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
|
||||
continue
|
||||
|
||||
if inspect.isclass(type_):
|
||||
if type(type_) == typing._TypedDictMeta: # type: ignore
|
||||
# The clasification of the class is used to select a template
|
||||
# for the object when rendering the documentation.
|
||||
# See `templates` directory for defined templates.
|
||||
# This is a hacky solution to distinguish between different
|
||||
# kinds of thing that we want to render.
|
||||
if type(type_) is typing_extensions._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif type(type_) is typing._TypedDictMeta: # type: ignore
|
||||
kind: ClassKind = "TypedDict"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# RunnableSerializable subclasses from Pydantic which
|
||||
# for which we use autodoc_pydantic for rendering.
|
||||
# We need to distinguish these from regular Pydantic
|
||||
# classes so we can hide inherited Runnable methods
|
||||
# and provide a link to the Runnable interface from
|
||||
# the template.
|
||||
kind = "RunnablePydantic"
|
||||
elif (
|
||||
issubclass(type_, Runnable)
|
||||
and not issubclass(type_, BaseModel)
|
||||
and type_ is not Runnable
|
||||
):
|
||||
# These are not pydantic classes but are Runnable.
|
||||
# We'll hide all the inherited methods from Runnable
|
||||
# but use a regular class template to render.
|
||||
kind = "RunnableNonPydantic"
|
||||
elif issubclass(type_, Enum):
|
||||
kind = "enum"
|
||||
elif issubclass(type_, BaseModel):
|
||||
@@ -128,11 +165,11 @@ def _load_package_modules(
|
||||
of the modules/packages are part of the package vs. 3rd party or built-in.
|
||||
|
||||
Parameters:
|
||||
package_directory: Path to the package directory.
|
||||
submodule: Optional name of submodule to load.
|
||||
package_directory (Union[str, Path]): Path to the package directory.
|
||||
submodule (Optional[str]): Optional name of submodule to load.
|
||||
|
||||
Returns:
|
||||
list: A list of loaded module objects.
|
||||
Dict[str, ModuleMembers]: A dictionary where keys are module names and values are ModuleMembers objects.
|
||||
"""
|
||||
package_path = (
|
||||
Path(package_directory)
|
||||
@@ -187,7 +224,7 @@ def _load_package_modules(
|
||||
modules_by_namespace[top_namespace] = _module_members
|
||||
|
||||
except ImportError as e:
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}") # noqa: T201
|
||||
print(f"Error: Unable to import module '{namespace}' with error: {e}")
|
||||
|
||||
return modules_by_namespace
|
||||
|
||||
@@ -251,6 +288,10 @@ Classes
|
||||
template = "enum.rst"
|
||||
elif class_["kind"] == "Pydantic":
|
||||
template = "pydantic.rst"
|
||||
elif class_["kind"] == "RunnablePydantic":
|
||||
template = "runnable_pydantic.rst"
|
||||
elif class_["kind"] == "RunnableNonPydantic":
|
||||
template = "runnable_non_pydantic.rst"
|
||||
else:
|
||||
template = "class.rst"
|
||||
|
||||
@@ -364,7 +405,7 @@ def main(dirs: Optional[list] = None) -> None:
|
||||
dirs += [
|
||||
dir_
|
||||
for dir_ in os.listdir(ROOT_DIR / "libs" / "partners")
|
||||
if os.path.isdir(dir_)
|
||||
if os.path.isdir(ROOT_DIR / "libs" / "partners" / dir_)
|
||||
and "pyproject.toml" in os.listdir(ROOT_DIR / "libs" / "partners" / dir_)
|
||||
]
|
||||
for dir_ in dirs:
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -33,4 +33,4 @@
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
.. example_links:: {{ objname }}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace
|
||||
|
||||
|
||||
{% block attributes %}
|
||||
{% endblock %}
|
||||
|
||||
39
docs/api_reference/templates/runnable_non_pydantic.rst
Normal file
39
docs/api_reference/templates/runnable_non_pydantic.rst
Normal file
@@ -0,0 +1,39 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autoclass:: {{ objname }}
|
||||
|
||||
{% block attributes %}
|
||||
{% if attributes %}
|
||||
.. rubric:: {{ _('Attributes') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in attributes %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block methods %}
|
||||
{% if methods %}
|
||||
.. rubric:: {{ _('Methods') }}
|
||||
|
||||
.. autosummary::
|
||||
{% for item in methods %}
|
||||
~{{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% for item in methods %}
|
||||
.. automethod:: {{ name }}.{{ item }}
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
22
docs/api_reference/templates/runnable_pydantic.rst
Normal file
22
docs/api_reference/templates/runnable_pydantic.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
:mod:`{{module}}`.{{objname}}
|
||||
{{ underline }}==============
|
||||
|
||||
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃
|
||||
|
||||
.. currentmodule:: {{ module }}
|
||||
|
||||
.. autopydantic_model:: {{ objname }}
|
||||
:model-show-json: False
|
||||
:model-show-config-summary: False
|
||||
:model-show-validator-members: False
|
||||
:model-show-field-summary: False
|
||||
:field-signature-prefix: param
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
:member-order: groupwise
|
||||
:show-inheritance: True
|
||||
:special-members: __call__
|
||||
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, invoke, ainvoke, batch, abatch, batch_as_completed, abatch_as_completed, astream_log, stream, astream, astream_events, transform, atransform, get_output_schema, get_prompts, configurable_fields, configurable_alternatives, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign
|
||||
|
||||
.. example_links:: {{ objname }}
|
||||
@@ -2,132 +2,129 @@
|
||||
{%- set url_root = pathto('', 1) %}
|
||||
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
|
||||
{%- if not embedded and docstitle %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- set titlesuffix = " — "|safe + docstitle|e %}
|
||||
{%- else %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- set titlesuffix = "" %}
|
||||
{%- endif %}
|
||||
{%- set lang_attr = 'en' %}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
|
||||
<!--[if gt IE 8]><!--> <html class="no-js" lang="{{ lang_attr }}" > <!--<![endif]-->
|
||||
<!--[if gt IE 8]><!-->
|
||||
<html class="no-js" lang="{{ lang_attr }}"> <!--<![endif]-->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta charset="utf-8">
|
||||
{{ metatags }}
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical" href="https://api.python.langchain.com/en/latest/{{pagename}}.html" />
|
||||
{% block htmltitle %}
|
||||
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
|
||||
{% endblock %}
|
||||
<link rel="canonical"
|
||||
href="https://api.python.langchain.com/en/latest/{{ pagename }}.html"/>
|
||||
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
{% if favicon_url %}
|
||||
<link rel="shortcut icon" href="{{ favicon_url|e }}"/>
|
||||
{% endif %}
|
||||
|
||||
<link rel="stylesheet" href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}" type="text/css" />
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
<link rel="stylesheet"
|
||||
href="{{ pathto('_static/css/vendor/bootstrap.min.css', 1) }}"
|
||||
type="text/css"/>
|
||||
{%- for css in css_files %}
|
||||
{%- if css|attr("rel") %}
|
||||
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}"
|
||||
type="text/css"{% if css.title is not none %}
|
||||
title="{{ css.title }}"{% endif %} />
|
||||
{%- else %}
|
||||
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css"/>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css"/>
|
||||
<script id="documentation_options" data-url_root="{{ pathto('', 1) }}"
|
||||
src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
|
||||
<script src="{{ pathto('_static/jquery.js', 1) }}"></script>
|
||||
{%- block extrahead %} {% endblock %}
|
||||
</head>
|
||||
<body>
|
||||
{% include "nav.html" %}
|
||||
{%- block content %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary" for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
<div class="btn-group w-100 mb-2" role="group" aria-label="rellinks">
|
||||
{%- if prev %}
|
||||
<a href="{{ prev.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ prev.title|striptags }}">Prev</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Prev</a>
|
||||
{%- endif %}
|
||||
{%- if parents -%}
|
||||
<a href="{{ parents[-1].link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ parents[-1].title|striptags }}">Up</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink disabled py-1">Up</a>
|
||||
{%- endif %}
|
||||
{%- if next %}
|
||||
<a href="{{ next.link|e }}" role="button" class="btn sk-btn-rellink py-1" sk-rellink-tooltip="{{ next.title|striptags }}">Next</a>
|
||||
{%- else %}
|
||||
<a href="#" role="button" class="btn sk-btn-rellink py-1 disabled"">Next</a>
|
||||
{%- endif %}
|
||||
<div class="d-flex" id="sk-doc-wrapper">
|
||||
<input type="checkbox" name="sk-toggle-checkbox" id="sk-toggle-checkbox">
|
||||
<label id="sk-sidemenu-toggle" class="sk-btn-toggle-toc btn sk-btn-primary"
|
||||
for="sk-toggle-checkbox">Toggle Menu</label>
|
||||
<div id="sk-sidebar-wrapper" class="border-right">
|
||||
<div class="sk-sidebar-toc-wrapper">
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}"
|
||||
class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}"
|
||||
class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
{%- if meta and meta['parenttoc']|tobool %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{% set nav = get_nav_object(maxdepth=3, collapse=True, numbered=True) %}
|
||||
<ul>
|
||||
{% for main_nav_item in nav %}
|
||||
{% if main_nav_item.active %}
|
||||
<li>
|
||||
<a href="{{ main_nav_item.url }}" class="sk-toc-active">{{ main_nav_item.title }}</a>
|
||||
</li>
|
||||
<ul>
|
||||
{% for nav_item in main_nav_item.children %}
|
||||
<li>
|
||||
<a href="{{ nav_item.url }}" class="{% if nav_item.active %}sk-toc-active{% endif %}">{{ nav_item.title }}</a>
|
||||
{% if nav_item.children %}
|
||||
<ul>
|
||||
{% for inner_child in nav_item.children %}
|
||||
<li class="sk-toctree-l3">
|
||||
<a href="{{ inner_child.url }}">{{ inner_child.title }}</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
{%- elif meta and meta['globalsidebartoc']|tobool %}
|
||||
<div class="sk-sidebar-toc sk-sidebar-global-toc">
|
||||
{{ toctree(maxdepth=2, titles_only=True) }}
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}
|
||||
© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}
|
||||
.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated
|
||||
on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}"
|
||||
rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
{%- else %}
|
||||
<div class="sk-sidebar-toc">
|
||||
{{ toc }}
|
||||
</div>
|
||||
{%- endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="sk-page-content-wrapper">
|
||||
<div class="sk-page-content container-fluid body px-md-3" role="main">
|
||||
{% block body %}{% endblock %}
|
||||
</div>
|
||||
<div class="container">
|
||||
<footer class="sk-content-footer">
|
||||
{%- if pagename != 'index' %}
|
||||
{%- if show_copyright %}
|
||||
{%- if hasdoc('copyright') %}
|
||||
{% trans path=pathto('copyright'), copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- else %}
|
||||
{% trans copyright=copyright|e %}© {{ copyright }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if last_updated %}
|
||||
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
|
||||
{%- endif %}
|
||||
{%- if show_source and has_source and sourcename %}
|
||||
<a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow">{{ _('Show this page source') }}</a>
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
</footer>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%- endblock %}
|
||||
<script src="{{ pathto('_static/js/vendor/bootstrap.min.js', 1) }}"></script>
|
||||
{% include "javascript.html" %}
|
||||
|
||||
1187
docs/data/people.yml
1187
docs/data/people.yml
File diff suppressed because it is too large
Load Diff
912
docs/docs/additional_resources/arxiv_references.mdx
Normal file
912
docs/docs/additional_resources/arxiv_references.mdx
Normal file
@@ -0,0 +1,912 @@
|
||||
# arXiv
|
||||
|
||||
LangChain implements the latest research in the field of Natural Language Processing.
|
||||
This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference,
|
||||
Templates, and Cookbooks.
|
||||
|
||||
From the opposite direction, scientists use LangChain in research and reference LangChain in the research papers.
|
||||
Here you find [such papers](https://arxiv.org/search/?query=langchain&searchtype=all&source=header).
|
||||
|
||||
## Summary
|
||||
|
||||
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
|
||||
|------------------|---------|-------------------|------------------------|
|
||||
| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024-02-06 | `Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
|
||||
| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024-01-31 | `Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
|
||||
| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024-01-29 | `Cookbook:` [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
|
||||
| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024-01-08 | `Cookbook:` [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
|
||||
| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023-12-11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023-11-15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023-10-17 | `Cookbook:` [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
|
||||
| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
|
||||
| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023-07-18 | `Cookbook:` [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
|
||||
| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
|
||||
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
|
||||
| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
|
||||
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022-10-06 | `Docs:` [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
## Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
|
||||
- **arXiv id:** 2402.03620v1
|
||||
- **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures
|
||||
- **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al.
|
||||
- **Published Date:** 2024-02-06
|
||||
- **URL:** http://arxiv.org/abs/2402.03620v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
|
||||
|
||||
**Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the
|
||||
task-intrinsic reasoning structures to tackle complex reasoning problems that
|
||||
are challenging for typical prompting methods. Core to the framework is a
|
||||
self-discovery process where LLMs select multiple atomic reasoning modules such
|
||||
as critical thinking and step-by-step thinking, and compose them into an
|
||||
explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER
|
||||
substantially improves GPT-4 and PaLM 2's performance on challenging reasoning
|
||||
benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as
|
||||
much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER
|
||||
outperforms inference-intensive methods such as CoT-Self-Consistency by more
|
||||
than 20%, while requiring 10-40x fewer inference compute. Finally, we show that
|
||||
the self-discovered reasoning structures are universally applicable across
|
||||
model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share
|
||||
commonalities with human reasoning patterns.
|
||||
|
||||
## RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
|
||||
|
||||
- **arXiv id:** 2401.18059v1
|
||||
- **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval
|
||||
- **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al.
|
||||
- **Published Date:** 2024-01-31
|
||||
- **URL:** http://arxiv.org/abs/2401.18059v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
|
||||
|
||||
**Abstract:** Retrieval-augmented language models can better adapt to changes in world
|
||||
state and incorporate long-tail knowledge. However, most existing methods
|
||||
retrieve only short contiguous chunks from a retrieval corpus, limiting
|
||||
holistic understanding of the overall document context. We introduce the novel
|
||||
approach of recursively embedding, clustering, and summarizing chunks of text,
|
||||
constructing a tree with differing levels of summarization from the bottom up.
|
||||
At inference time, our RAPTOR model retrieves from this tree, integrating
|
||||
information across lengthy documents at different levels of abstraction.
|
||||
Controlled experiments show that retrieval with recursive summaries offers
|
||||
significant improvements over traditional retrieval-augmented LMs on several
|
||||
tasks. On question-answering tasks that involve complex, multi-step reasoning,
|
||||
we show state-of-the-art results; for example, by coupling RAPTOR retrieval
|
||||
with the use of GPT-4, we can improve the best performance on the QuALITY
|
||||
benchmark by 20% in absolute accuracy.
|
||||
|
||||
## Corrective Retrieval Augmented Generation
|
||||
|
||||
- **arXiv id:** 2401.15884v2
|
||||
- **Title:** Corrective Retrieval Augmented Generation
|
||||
- **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al.
|
||||
- **Published Date:** 2024-01-29
|
||||
- **URL:** http://arxiv.org/abs/2401.15884v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the
|
||||
accuracy of generated texts cannot be secured solely by the parametric
|
||||
knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a
|
||||
practicable complement to LLMs, it relies heavily on the relevance of retrieved
|
||||
documents, raising concerns about how the model behaves if retrieval goes
|
||||
wrong. To this end, we propose the Corrective Retrieval Augmented Generation
|
||||
(CRAG) to improve the robustness of generation. Specifically, a lightweight
|
||||
retrieval evaluator is designed to assess the overall quality of retrieved
|
||||
documents for a query, returning a confidence degree based on which different
|
||||
knowledge retrieval actions can be triggered. Since retrieval from static and
|
||||
limited corpora can only return sub-optimal documents, large-scale web searches
|
||||
are utilized as an extension for augmenting the retrieval results. Besides, a
|
||||
decompose-then-recompose algorithm is designed for retrieved documents to
|
||||
selectively focus on key information and filter out irrelevant information in
|
||||
them. CRAG is plug-and-play and can be seamlessly coupled with various
|
||||
RAG-based approaches. Experiments on four datasets covering short- and
|
||||
long-form generation tasks show that CRAG can significantly improve the
|
||||
performance of RAG-based approaches.
|
||||
|
||||
## Mixtral of Experts
|
||||
|
||||
- **arXiv id:** 2401.04088v1
|
||||
- **Title:** Mixtral of Experts
|
||||
- **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al.
|
||||
- **Published Date:** 2024-01-08
|
||||
- **URL:** http://arxiv.org/abs/2401.04088v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
|
||||
|
||||
**Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model.
|
||||
Mixtral has the same architecture as Mistral 7B, with the difference that each
|
||||
layer is composed of 8 feedforward blocks (i.e. experts). For every token, at
|
||||
each layer, a router network selects two experts to process the current state
|
||||
and combine their outputs. Even though each token only sees two experts, the
|
||||
selected experts can be different at each timestep. As a result, each token has
|
||||
access to 47B parameters, but only uses 13B active parameters during inference.
|
||||
Mixtral was trained with a context size of 32k tokens and it outperforms or
|
||||
matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular,
|
||||
Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and
|
||||
multilingual benchmarks. We also provide a model fine-tuned to follow
|
||||
instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo,
|
||||
Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both
|
||||
the base and instruct models are released under the Apache 2.0 license.
|
||||
|
||||
## Dense X Retrieval: What Retrieval Granularity Should We Use?
|
||||
|
||||
- **arXiv id:** 2312.06648v2
|
||||
- **Title:** Dense X Retrieval: What Retrieval Granularity Should We Use?
|
||||
- **Authors:** Tong Chen, Hongwei Wang, Sihao Chen, et al.
|
||||
- **Published Date:** 2023-12-11
|
||||
- **URL:** http://arxiv.org/abs/2312.06648v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
|
||||
|
||||
**Abstract:** Dense retrieval has become a prominent method to obtain relevant context or
|
||||
world knowledge in open-domain NLP tasks. When we use a learned dense retriever
|
||||
on a retrieval corpus at inference time, an often-overlooked design choice is
|
||||
the retrieval unit in which the corpus is indexed, e.g. document, passage, or
|
||||
sentence. We discover that the retrieval unit choice significantly impacts the
|
||||
performance of both retrieval and downstream tasks. Distinct from the typical
|
||||
approach of using passages or sentences, we introduce a novel retrieval unit,
|
||||
proposition, for dense retrieval. Propositions are defined as atomic
|
||||
expressions within text, each encapsulating a distinct factoid and presented in
|
||||
a concise, self-contained natural language format. We conduct an empirical
|
||||
comparison of different retrieval granularity. Our results reveal that
|
||||
proposition-based retrieval significantly outperforms traditional passage or
|
||||
sentence-based methods in dense retrieval. Moreover, retrieval by proposition
|
||||
also enhances the performance of downstream QA tasks, since the retrieved texts
|
||||
are more condensed with question-relevant information, reducing the need for
|
||||
lengthy input tokens and minimizing the inclusion of extraneous, irrelevant
|
||||
information.
|
||||
|
||||
## Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
|
||||
|
||||
- **arXiv id:** 2311.09210v1
|
||||
- **Title:** Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models
|
||||
- **Authors:** Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al.
|
||||
- **Published Date:** 2023-11-15
|
||||
- **URL:** http://arxiv.org/abs/2311.09210v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
|
||||
|
||||
**Abstract:** Retrieval-augmented language models (RALMs) represent a substantial
|
||||
advancement in the capabilities of large language models, notably in reducing
|
||||
factual hallucination by leveraging external knowledge sources. However, the
|
||||
reliability of the retrieved information is not always guaranteed. The
|
||||
retrieval of irrelevant data can lead to misguided responses, and potentially
|
||||
causing the model to overlook its inherent knowledge, even when it possesses
|
||||
adequate information to address the query. Moreover, standard RALMs often
|
||||
struggle to assess whether they possess adequate knowledge, both intrinsic and
|
||||
retrieved, to provide an accurate answer. In situations where knowledge is
|
||||
lacking, these systems should ideally respond with "unknown" when the answer is
|
||||
unattainable. In response to these challenges, we introduces Chain-of-Noting
|
||||
(CoN), a novel approach aimed at improving the robustness of RALMs in facing
|
||||
noisy, irrelevant documents and in handling unknown scenarios. The core idea of
|
||||
CoN is to generate sequential reading notes for retrieved documents, enabling a
|
||||
thorough evaluation of their relevance to the given question and integrating
|
||||
this information to formulate the final answer. We employed ChatGPT to create
|
||||
training data for CoN, which was subsequently trained on an LLaMa-2 7B model.
|
||||
Our experiments across four open-domain QA benchmarks show that RALMs equipped
|
||||
with CoN significantly outperform standard RALMs. Notably, CoN achieves an
|
||||
average improvement of +7.9 in EM score given entirely noisy retrieved
|
||||
documents and +10.5 in rejection rates for real-time questions that fall
|
||||
outside the pre-training knowledge scope.
|
||||
|
||||
## Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
|
||||
|
||||
- **arXiv id:** 2310.11511v1
|
||||
- **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection
|
||||
- **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al.
|
||||
- **Published Date:** 2023-10-17
|
||||
- **URL:** http://arxiv.org/abs/2310.11511v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
|
||||
|
||||
**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often
|
||||
produce responses containing factual inaccuracies due to their sole reliance on
|
||||
the parametric knowledge they encapsulate. Retrieval-Augmented Generation
|
||||
(RAG), an ad hoc approach that augments LMs with retrieval of relevant
|
||||
knowledge, decreases such issues. However, indiscriminately retrieving and
|
||||
incorporating a fixed number of retrieved passages, regardless of whether
|
||||
retrieval is necessary, or passages are relevant, diminishes LM versatility or
|
||||
can lead to unhelpful response generation. We introduce a new framework called
|
||||
Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM's
|
||||
quality and factuality through retrieval and self-reflection. Our framework
|
||||
trains a single arbitrary LM that adaptively retrieves passages on-demand, and
|
||||
generates and reflects on retrieved passages and its own generations using
|
||||
special tokens, called reflection tokens. Generating reflection tokens makes
|
||||
the LM controllable during the inference phase, enabling it to tailor its
|
||||
behavior to diverse task requirements. Experiments show that Self-RAG (7B and
|
||||
13B parameters) significantly outperforms state-of-the-art LLMs and
|
||||
retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG
|
||||
outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA,
|
||||
reasoning and fact verification tasks, and it shows significant gains in
|
||||
improving factuality and citation accuracy for long-form generations relative
|
||||
to these models.
|
||||
|
||||
## Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
|
||||
|
||||
- **arXiv id:** 2310.06117v2
|
||||
- **Title:** Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models
|
||||
- **Authors:** Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al.
|
||||
- **Published Date:** 2023-10-09
|
||||
- **URL:** http://arxiv.org/abs/2310.06117v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
|
||||
- **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
|
||||
|
||||
**Abstract:** We present Step-Back Prompting, a simple prompting technique that enables
|
||||
LLMs to do abstractions to derive high-level concepts and first principles from
|
||||
instances containing specific details. Using the concepts and principles to
|
||||
guide reasoning, LLMs significantly improve their abilities in following a
|
||||
correct reasoning path towards the solution. We conduct experiments of
|
||||
Step-Back Prompting with PaLM-2L, GPT-4 and Llama2-70B models, and observe
|
||||
substantial performance gains on various challenging reasoning-intensive tasks
|
||||
including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back
|
||||
Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7%
|
||||
and 11% respectively, TimeQA by 27%, and MuSiQue by 7%.
|
||||
|
||||
## Llama 2: Open Foundation and Fine-Tuned Chat Models
|
||||
|
||||
- **arXiv id:** 2307.09288v2
|
||||
- **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models
|
||||
- **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al.
|
||||
- **Published Date:** 2023-07-18
|
||||
- **URL:** http://arxiv.org/abs/2307.09288v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
|
||||
|
||||
**Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and
|
||||
fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70
|
||||
billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for
|
||||
dialogue use cases. Our models outperform open-source chat models on most
|
||||
benchmarks we tested, and based on our human evaluations for helpfulness and
|
||||
safety, may be a suitable substitute for closed-source models. We provide a
|
||||
detailed description of our approach to fine-tuning and safety improvements of
|
||||
Llama 2-Chat in order to enable the community to build on our work and
|
||||
contribute to the responsible development of LLMs.
|
||||
|
||||
## Query Rewriting for Retrieval-Augmented Large Language Models
|
||||
|
||||
- **arXiv id:** 2305.14283v3
|
||||
- **Title:** Query Rewriting for Retrieval-Augmented Large Language Models
|
||||
- **Authors:** Xinbei Ma, Yeyun Gong, Pengcheng He, et al.
|
||||
- **Published Date:** 2023-05-23
|
||||
- **URL:** http://arxiv.org/abs/2305.14283v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read)
|
||||
- **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
|
||||
|
||||
**Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the
|
||||
retrieve-then-read pipeline, making remarkable progress in knowledge-intensive
|
||||
tasks. This work introduces a new framework, Rewrite-Retrieve-Read instead of
|
||||
the previous retrieve-then-read for the retrieval-augmented LLMs from the
|
||||
perspective of the query rewriting. Unlike prior studies focusing on adapting
|
||||
either the retriever or the reader, our approach pays attention to the
|
||||
adaptation of the search query itself, for there is inevitably a gap between
|
||||
the input text and the needed knowledge in retrieval. We first prompt an LLM to
|
||||
generate the query, then use a web search engine to retrieve contexts.
|
||||
Furthermore, to better align the query to the frozen modules, we propose a
|
||||
trainable scheme for our pipeline. A small language model is adopted as a
|
||||
trainable rewriter to cater to the black-box LLM reader. The rewriter is
|
||||
trained using the feedback of the LLM reader by reinforcement learning.
|
||||
Evaluation is conducted on downstream tasks, open-domain QA and multiple-choice
|
||||
QA. Experiments results show consistent performance improvement, indicating
|
||||
that our framework is proven effective and scalable, and brings a new framework
|
||||
for retrieval-augmented LLM.
|
||||
|
||||
## Large Language Model Guided Tree-of-Thought
|
||||
|
||||
- **arXiv id:** 2305.08291v1
|
||||
- **Title:** Large Language Model Guided Tree-of-Thought
|
||||
- **Authors:** Jieyi Long
|
||||
- **Published Date:** 2023-05-15
|
||||
- **URL:** http://arxiv.org/abs/2305.08291v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
|
||||
- **Cookbook:** [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
|
||||
|
||||
**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel
|
||||
approach aimed at improving the problem-solving capabilities of auto-regressive
|
||||
large language models (LLMs). The ToT technique is inspired by the human mind's
|
||||
approach for solving complex reasoning tasks through trial and error. In this
|
||||
process, the human mind explores the solution space through a tree-like thought
|
||||
process, allowing for backtracking when necessary. To implement ToT as a
|
||||
software system, we augment an LLM with additional modules including a prompter
|
||||
agent, a checker module, a memory module, and a ToT controller. In order to
|
||||
solve a given problem, these modules engage in a multi-round conversation with
|
||||
the LLM. The memory module records the conversation and state history of the
|
||||
problem solving process, which allows the system to backtrack to the previous
|
||||
steps of the thought-process and explore other directions from there. To verify
|
||||
the effectiveness of the proposed technique, we implemented a ToT-based solver
|
||||
for the Sudoku Puzzle. Experimental results show that the ToT framework can
|
||||
significantly increase the success rate of Sudoku puzzle solving. Our
|
||||
implementation of the ToT-based Sudoku solver is available on GitHub:
|
||||
\url{https://github.com/jieyilong/tree-of-thought-puzzle-solver}.
|
||||
|
||||
## Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
|
||||
|
||||
- **arXiv id:** 2305.04091v3
|
||||
- **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models
|
||||
- **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al.
|
||||
- **Published Date:** 2023-05-06
|
||||
- **URL:** http://arxiv.org/abs/2305.04091v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have recently been shown to deliver impressive
|
||||
performance in various NLP tasks. To tackle multi-step reasoning tasks,
|
||||
few-shot chain-of-thought (CoT) prompting includes a few manually crafted
|
||||
step-by-step reasoning demonstrations which enable LLMs to explicitly generate
|
||||
reasoning steps and improve their reasoning task accuracy. To eliminate the
|
||||
manual effort, Zero-shot-CoT concatenates the target problem statement with
|
||||
"Let's think step by step" as an input prompt to LLMs. Despite the success of
|
||||
Zero-shot-CoT, it still suffers from three pitfalls: calculation errors,
|
||||
missing-step errors, and semantic misunderstanding errors. To address the
|
||||
missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of
|
||||
two components: first, devising a plan to divide the entire task into smaller
|
||||
subtasks, and then carrying out the subtasks according to the plan. To address
|
||||
the calculation errors and improve the quality of generated reasoning steps, we
|
||||
extend PS prompting with more detailed instructions and derive PS+ prompting.
|
||||
We evaluate our proposed prompting strategy on ten datasets across three
|
||||
reasoning problems. The experimental results over GPT-3 show that our proposed
|
||||
zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets
|
||||
by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought
|
||||
Prompting, and has comparable performance with 8-shot CoT prompting on the math
|
||||
reasoning problem. The code can be found at
|
||||
https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting.
|
||||
|
||||
## Visual Instruction Tuning
|
||||
|
||||
- **arXiv id:** 2304.08485v2
|
||||
- **Title:** Visual Instruction Tuning
|
||||
- **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al.
|
||||
- **Published Date:** 2023-04-17
|
||||
- **URL:** http://arxiv.org/abs/2304.08485v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb)
|
||||
|
||||
**Abstract:** Instruction tuning large language models (LLMs) using machine-generated
|
||||
instruction-following data has improved zero-shot capabilities on new tasks,
|
||||
but the idea is less explored in the multimodal field. In this paper, we
|
||||
present the first attempt to use language-only GPT-4 to generate multimodal
|
||||
language-image instruction-following data. By instruction tuning on such
|
||||
generated data, we introduce LLaVA: Large Language and Vision Assistant, an
|
||||
end-to-end trained large multimodal model that connects a vision encoder and
|
||||
LLM for general-purpose visual and language understanding.Our early experiments
|
||||
show that LLaVA demonstrates impressive multimodel chat abilities, sometimes
|
||||
exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and
|
||||
yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal
|
||||
instruction-following dataset. When fine-tuned on Science QA, the synergy of
|
||||
LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make
|
||||
GPT-4 generated visual instruction tuning data, our model and code base
|
||||
publicly available.
|
||||
|
||||
## Generative Agents: Interactive Simulacra of Human Behavior
|
||||
|
||||
- **arXiv id:** 2304.03442v2
|
||||
- **Title:** Generative Agents: Interactive Simulacra of Human Behavior
|
||||
- **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al.
|
||||
- **Published Date:** 2023-04-07
|
||||
- **URL:** http://arxiv.org/abs/2304.03442v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb)
|
||||
|
||||
**Abstract:** Believable proxies of human behavior can empower interactive applications
|
||||
ranging from immersive environments to rehearsal spaces for interpersonal
|
||||
communication to prototyping tools. In this paper, we introduce generative
|
||||
agents--computational software agents that simulate believable human behavior.
|
||||
Generative agents wake up, cook breakfast, and head to work; artists paint,
|
||||
while authors write; they form opinions, notice each other, and initiate
|
||||
conversations; they remember and reflect on days past as they plan the next
|
||||
day. To enable generative agents, we describe an architecture that extends a
|
||||
large language model to store a complete record of the agent's experiences
|
||||
using natural language, synthesize those memories over time into higher-level
|
||||
reflections, and retrieve them dynamically to plan behavior. We instantiate
|
||||
generative agents to populate an interactive sandbox environment inspired by
|
||||
The Sims, where end users can interact with a small town of twenty five agents
|
||||
using natural language. In an evaluation, these generative agents produce
|
||||
believable individual and emergent social behaviors: for example, starting with
|
||||
only a single user-specified notion that one agent wants to throw a Valentine's
|
||||
Day party, the agents autonomously spread invitations to the party over the
|
||||
next two days, make new acquaintances, ask each other out on dates to the
|
||||
party, and coordinate to show up for the party together at the right time. We
|
||||
demonstrate through ablation that the components of our agent
|
||||
architecture--observation, planning, and reflection--each contribute critically
|
||||
to the believability of agent behavior. By fusing large language models with
|
||||
computational, interactive agents, this work introduces architectural and
|
||||
interaction patterns for enabling believable simulations of human behavior.
|
||||
|
||||
## CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
|
||||
|
||||
- **arXiv id:** 2303.17760v2
|
||||
- **Title:** CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society
|
||||
- **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al.
|
||||
- **Published Date:** 2023-03-31
|
||||
- **URL:** http://arxiv.org/abs/2303.17760v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Cookbook:** [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
|
||||
|
||||
**Abstract:** The rapid advancement of chat-based language models has led to remarkable
|
||||
progress in complex task-solving. However, their success heavily relies on
|
||||
human input to guide the conversation, which can be challenging and
|
||||
time-consuming. This paper explores the potential of building scalable
|
||||
techniques to facilitate autonomous cooperation among communicative agents, and
|
||||
provides insight into their "cognitive" processes. To address the challenges of
|
||||
achieving autonomous cooperation, we propose a novel communicative agent
|
||||
framework named role-playing. Our approach involves using inception prompting
|
||||
to guide chat agents toward task completion while maintaining consistency with
|
||||
human intentions. We showcase how role-playing can be used to generate
|
||||
conversational data for studying the behaviors and capabilities of a society of
|
||||
agents, providing a valuable resource for investigating conversational language
|
||||
models. In particular, we conduct comprehensive studies on
|
||||
instruction-following cooperation in multi-agent settings. Our contributions
|
||||
include introducing a novel communicative agent framework, offering a scalable
|
||||
approach for studying the cooperative behaviors and capabilities of multi-agent
|
||||
systems, and open-sourcing our library to support research on communicative
|
||||
agents and beyond: https://github.com/camel-ai/camel.
|
||||
|
||||
## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
|
||||
|
||||
- **arXiv id:** 2303.17580v4
|
||||
- **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
|
||||
- **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.
|
||||
- **Published Date:** 2023-03-30
|
||||
- **URL:** http://arxiv.org/abs/2303.17580v4
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
|
||||
- **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
|
||||
|
||||
**Abstract:** Solving complicated AI tasks with different domains and modalities is a key
|
||||
step toward artificial general intelligence. While there are numerous AI models
|
||||
available for various domains and modalities, they cannot handle complicated AI
|
||||
tasks autonomously. Considering large language models (LLMs) have exhibited
|
||||
exceptional abilities in language understanding, generation, interaction, and
|
||||
reasoning, we advocate that LLMs could act as a controller to manage existing
|
||||
AI models to solve complicated AI tasks, with language serving as a generic
|
||||
interface to empower this. Based on this philosophy, we present HuggingGPT, an
|
||||
LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI
|
||||
models in machine learning communities (e.g., Hugging Face) to solve AI tasks.
|
||||
Specifically, we use ChatGPT to conduct task planning when receiving a user
|
||||
request, select models according to their function descriptions available in
|
||||
Hugging Face, execute each subtask with the selected AI model, and summarize
|
||||
the response according to the execution results. By leveraging the strong
|
||||
language capability of ChatGPT and abundant AI models in Hugging Face,
|
||||
HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different
|
||||
modalities and domains and achieve impressive results in language, vision,
|
||||
speech, and other challenging tasks, which paves a new way towards the
|
||||
realization of artificial general intelligence.
|
||||
|
||||
## GPT-4 Technical Report
|
||||
|
||||
- **arXiv id:** 2303.08774v6
|
||||
- **Title:** GPT-4 Technical Report
|
||||
- **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.
|
||||
- **Published Date:** 2023-03-15
|
||||
- **URL:** http://arxiv.org/abs/2303.08774v6
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
|
||||
|
||||
**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can
|
||||
accept image and text inputs and produce text outputs. While less capable than
|
||||
humans in many real-world scenarios, GPT-4 exhibits human-level performance on
|
||||
various professional and academic benchmarks, including passing a simulated bar
|
||||
exam with a score around the top 10% of test takers. GPT-4 is a
|
||||
Transformer-based model pre-trained to predict the next token in a document.
|
||||
The post-training alignment process results in improved performance on measures
|
||||
of factuality and adherence to desired behavior. A core component of this
|
||||
project was developing infrastructure and optimization methods that behave
|
||||
predictably across a wide range of scales. This allowed us to accurately
|
||||
predict some aspects of GPT-4's performance based on models trained with no
|
||||
more than 1/1,000th the compute of GPT-4.
|
||||
|
||||
## A Watermark for Large Language Models
|
||||
|
||||
- **arXiv id:** 2301.10226v4
|
||||
- **Title:** A Watermark for Large Language Models
|
||||
- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
|
||||
- **Published Date:** 2023-01-24
|
||||
- **URL:** http://arxiv.org/abs/2301.10226v4
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Potential harms of large language models can be mitigated by watermarking
|
||||
model output, i.e., embedding signals into generated text that are invisible to
|
||||
humans but algorithmically detectable from a short span of tokens. We propose a
|
||||
watermarking framework for proprietary language models. The watermark can be
|
||||
embedded with negligible impact on text quality, and can be detected using an
|
||||
efficient open-source algorithm without access to the language model API or
|
||||
parameters. The watermark works by selecting a randomized set of "green" tokens
|
||||
before a word is generated, and then softly promoting use of green tokens
|
||||
during sampling. We propose a statistical test for detecting the watermark with
|
||||
interpretable p-values, and derive an information-theoretic framework for
|
||||
analyzing the sensitivity of the watermark. We test the watermark using a
|
||||
multi-billion parameter model from the Open Pretrained Transformer (OPT)
|
||||
family, and discuss robustness and security.
|
||||
|
||||
## Precise Zero-Shot Dense Retrieval without Relevance Labels
|
||||
|
||||
- **arXiv id:** 2212.10496v1
|
||||
- **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels
|
||||
- **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.
|
||||
- **Published Date:** 2022-12-20
|
||||
- **URL:** http://arxiv.org/abs/2212.10496v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
|
||||
- **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
|
||||
- **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
|
||||
|
||||
**Abstract:** While dense retrieval has been shown effective and efficient across tasks and
|
||||
languages, it remains difficult to create effective fully zero-shot dense
|
||||
retrieval systems when no relevance label is available. In this paper, we
|
||||
recognize the difficulty of zero-shot learning and encoding relevance. Instead,
|
||||
we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a
|
||||
query, HyDE first zero-shot instructs an instruction-following language model
|
||||
(e.g. InstructGPT) to generate a hypothetical document. The document captures
|
||||
relevance patterns but is unreal and may contain false details. Then, an
|
||||
unsupervised contrastively learned encoder~(e.g. Contriever) encodes the
|
||||
document into an embedding vector. This vector identifies a neighborhood in the
|
||||
corpus embedding space, where similar real documents are retrieved based on
|
||||
vector similarity. This second step ground the generated document to the actual
|
||||
corpus, with the encoder's dense bottleneck filtering out the incorrect
|
||||
details. Our experiments show that HyDE significantly outperforms the
|
||||
state-of-the-art unsupervised dense retriever Contriever and shows strong
|
||||
performance comparable to fine-tuned retrievers, across various tasks (e.g. web
|
||||
search, QA, fact verification) and languages~(e.g. sw, ko, ja).
|
||||
|
||||
## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
|
||||
|
||||
- **arXiv id:** 2212.07425v3
|
||||
- **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
|
||||
- **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.
|
||||
- **Published Date:** 2022-12-12
|
||||
- **URL:** http://arxiv.org/abs/2212.07425v3
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
|
||||
|
||||
**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been
|
||||
amplified in the Internet era. Given the volume of data and the subtlety of
|
||||
identifying violations of argumentation norms, supporting information analytics
|
||||
tasks, like content moderation, with trustworthy methods that can identify
|
||||
logical fallacies is essential. In this paper, we formalize prior theoretical
|
||||
work on logical fallacies into a comprehensive three-stage evaluation framework
|
||||
of detection, coarse-grained, and fine-grained classification. We adapt
|
||||
existing evaluation datasets for each stage of the evaluation. We employ three
|
||||
families of robust and explainable methods based on prototype reasoning,
|
||||
instance-based reasoning, and knowledge injection. The methods combine language
|
||||
models with background knowledge and explainable mechanisms. Moreover, we
|
||||
address data sparsity with strategies for data augmentation and curriculum
|
||||
learning. Our three-stage framework natively consolidates prior datasets and
|
||||
methods from existing tasks, like propaganda detection, serving as an
|
||||
overarching evaluation testbed. We extensively evaluate these methods on our
|
||||
datasets, focusing on their robustness and explainability. Our results provide
|
||||
insight into the strengths and weaknesses of the methods on different
|
||||
components and fallacy classes, indicating that fallacy identification is a
|
||||
challenging task that may require specialized forms of reasoning to capture
|
||||
various classes. We share our open-source code and data on GitHub to support
|
||||
further work on logical fallacy identification.
|
||||
|
||||
## Complementary Explanations for Effective In-Context Learning
|
||||
|
||||
- **arXiv id:** 2211.13892v2
|
||||
- **Title:** Complementary Explanations for Effective In-Context Learning
|
||||
- **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.
|
||||
- **Published Date:** 2022-11-25
|
||||
- **URL:** http://arxiv.org/abs/2211.13892v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
|
||||
learning from explanations in prompts, but there has been limited understanding
|
||||
of exactly how these explanations function or why they are effective. This work
|
||||
aims to better understand the mechanisms by which explanations are used for
|
||||
in-context learning. We first study the impact of two different factors on the
|
||||
performance of prompts with explanations: the computation trace (the way the
|
||||
solution is decomposed) and the natural language used to express the prompt. By
|
||||
perturbing explanations on three controlled tasks, we show that both factors
|
||||
contribute to the effectiveness of explanations. We further study how to form
|
||||
maximally effective sets of explanations for solving a given test query. We
|
||||
find that LLMs can benefit from the complementarity of the explanation set:
|
||||
diverse reasoning skills shown by different exemplars can lead to better
|
||||
performance. Therefore, we propose a maximal marginal relevance-based exemplar
|
||||
selection approach for constructing exemplar sets that are both relevant as
|
||||
well as complementary, which successfully improves the in-context learning
|
||||
performance across three real-world tasks on multiple LLMs.
|
||||
|
||||
## PAL: Program-aided Language Models
|
||||
|
||||
- **arXiv id:** 2211.10435v2
|
||||
- **Title:** PAL: Program-aided Language Models
|
||||
- **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.
|
||||
- **Published Date:** 2022-11-18
|
||||
- **URL:** http://arxiv.org/abs/2211.10435v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
|
||||
- **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
|
||||
|
||||
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
|
||||
to perform arithmetic and symbolic reasoning tasks, when provided with a few
|
||||
examples at test time ("few-shot prompting"). Much of this success can be
|
||||
attributed to prompting methods such as "chain-of-thought'', which employ LLMs
|
||||
for both understanding the problem description by decomposing it into steps, as
|
||||
well as solving each step of the problem. While LLMs seem to be adept at this
|
||||
sort of step-by-step decomposition, LLMs often make logical and arithmetic
|
||||
mistakes in the solution part, even when the problem is decomposed correctly.
|
||||
In this paper, we present Program-Aided Language models (PAL): a novel approach
|
||||
that uses the LLM to read natural language problems and generate programs as
|
||||
the intermediate reasoning steps, but offloads the solution step to a runtime
|
||||
such as a Python interpreter. With PAL, decomposing the natural language
|
||||
problem into runnable steps remains the only learning task for the LLM, while
|
||||
solving is delegated to the interpreter. We demonstrate this synergy between a
|
||||
neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and
|
||||
algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all
|
||||
these natural language reasoning tasks, generating code using an LLM and
|
||||
reasoning using a Python interpreter leads to more accurate results than much
|
||||
larger models. For example, PAL using Codex achieves state-of-the-art few-shot
|
||||
accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
|
||||
which uses chain-of-thought by absolute 15% top-1. Our code and data are
|
||||
publicly available at http://reasonwithpal.com/ .
|
||||
|
||||
## ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
|
||||
- **arXiv id:** 2210.03629v3
|
||||
- **Title:** ReAct: Synergizing Reasoning and Acting in Language Models
|
||||
- **Authors:** Shunyu Yao, Jeffrey Zhao, Dian Yu, et al.
|
||||
- **Published Date:** 2022-10-06
|
||||
- **URL:** http://arxiv.org/abs/2210.03629v3
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/integrations/chat/huggingface](https://python.langchain.com/docs/integrations/chat/huggingface), [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping)
|
||||
- **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
|
||||
|
||||
**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
|
||||
across tasks in language understanding and interactive decision making, their
|
||||
abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g.
|
||||
action plan generation) have primarily been studied as separate topics. In this
|
||||
paper, we explore the use of LLMs to generate both reasoning traces and
|
||||
task-specific actions in an interleaved manner, allowing for greater synergy
|
||||
between the two: reasoning traces help the model induce, track, and update
|
||||
action plans as well as handle exceptions, while actions allow it to interface
|
||||
with external sources, such as knowledge bases or environments, to gather
|
||||
additional information. We apply our approach, named ReAct, to a diverse set of
|
||||
language and decision making tasks and demonstrate its effectiveness over
|
||||
state-of-the-art baselines, as well as improved human interpretability and
|
||||
trustworthiness over methods without reasoning or acting components.
|
||||
Concretely, on question answering (HotpotQA) and fact verification (Fever),
|
||||
ReAct overcomes issues of hallucination and error propagation prevalent in
|
||||
chain-of-thought reasoning by interacting with a simple Wikipedia API, and
|
||||
generates human-like task-solving trajectories that are more interpretable than
|
||||
baselines without reasoning traces. On two interactive decision making
|
||||
benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and
|
||||
reinforcement learning methods by an absolute success rate of 34% and 10%
|
||||
respectively, while being prompted with only one or two in-context examples.
|
||||
Project site with code: https://react-lm.github.io
|
||||
|
||||
## Deep Lake: a Lakehouse for Deep Learning
|
||||
|
||||
- **arXiv id:** 2209.10785v2
|
||||
- **Title:** Deep Lake: a Lakehouse for Deep Learning
|
||||
- **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.
|
||||
- **Published Date:** 2022-09-22
|
||||
- **URL:** http://arxiv.org/abs/2209.10785v2
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
|
||||
|
||||
**Abstract:** Traditional data lakes provide critical data infrastructure for analytical
|
||||
workloads by enabling time travel, running SQL queries, ingesting data with
|
||||
ACID transactions, and visualizing petabyte-scale datasets on cloud storage.
|
||||
They allow organizations to break down data silos, unlock data-driven
|
||||
decision-making, improve operational efficiency, and reduce costs. However, as
|
||||
deep learning usage increases, traditional data lakes are not well-designed for
|
||||
applications such as natural language processing (NLP), audio processing,
|
||||
computer vision, and applications involving non-tabular datasets. This paper
|
||||
presents Deep Lake, an open-source lakehouse for deep learning applications
|
||||
developed at Activeloop. Deep Lake maintains the benefits of a vanilla data
|
||||
lake with one key difference: it stores complex data, such as images, videos,
|
||||
annotations, as well as tabular data, in the form of tensors and rapidly
|
||||
streams the data over the network to (a) Tensor Query Language, (b) in-browser
|
||||
visualization engine, or (c) deep learning frameworks without sacrificing GPU
|
||||
utilization. Datasets stored in Deep Lake can be accessed from PyTorch,
|
||||
TensorFlow, JAX, and integrate with numerous MLOps tools.
|
||||
|
||||
## Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
|
||||
|
||||
- **arXiv id:** 2205.12654v1
|
||||
- **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
|
||||
- **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
|
||||
- **Published Date:** 2022-05-25
|
||||
- **URL:** http://arxiv.org/abs/2205.12654v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
|
||||
|
||||
**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
|
||||
languages is challenging, in particular to cover the long tail of low-resource
|
||||
languages. A promising approach has been to train one-for-all multilingual
|
||||
models capable of cross-lingual transfer, but these models often suffer from
|
||||
insufficient capacity and interference between unrelated languages. Instead, we
|
||||
move away from this approach and focus on training multiple language (family)
|
||||
specific representations, but most prominently enable all languages to still be
|
||||
encoded in the same representational space. To achieve this, we focus on
|
||||
teacher-student training, allowing all encoders to be mutually compatible for
|
||||
bitext mining, and enabling fast learning of new languages. We introduce a new
|
||||
teacher-student training scheme which combines supervised and self-supervised
|
||||
training, allowing encoders to take advantage of monolingual training data,
|
||||
which is valuable in the low-resource setting.
|
||||
Our approach significantly outperforms the original LASER encoder. We study
|
||||
very low-resource languages and handle 50 African languages, many of which are
|
||||
not covered by any other model. For these languages, we train sentence
|
||||
encoders, mine bitexts, and validate the bitexts by training NMT systems.
|
||||
|
||||
## Evaluating the Text-to-SQL Capabilities of Large Language Models
|
||||
|
||||
- **arXiv id:** 2204.00498v1
|
||||
- **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models
|
||||
- **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
|
||||
- **Published Date:** 2022-03-15
|
||||
- **URL:** http://arxiv.org/abs/2204.00498v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase)
|
||||
|
||||
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
|
||||
language model. We find that, without any finetuning, Codex is a strong
|
||||
baseline on the Spider benchmark; we also analyze the failure modes of Codex in
|
||||
this setting. Furthermore, we demonstrate on the GeoQuery and Scholar
|
||||
benchmarks that a small number of in-domain examples provided in the prompt
|
||||
enables Codex to perform better than state-of-the-art models finetuned on such
|
||||
few-shot examples.
|
||||
|
||||
## Locally Typical Sampling
|
||||
|
||||
- **arXiv id:** 2202.00666v5
|
||||
- **Title:** Locally Typical Sampling
|
||||
- **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.
|
||||
- **Published Date:** 2022-02-01
|
||||
- **URL:** http://arxiv.org/abs/2202.00666v5
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Today's probabilistic language generators fall short when it comes to
|
||||
producing coherent and fluent text despite the fact that the underlying models
|
||||
perform well under standard metrics, e.g., perplexity. This discrepancy has
|
||||
puzzled the language generation community for the last few years. In this work,
|
||||
we posit that the abstraction of natural language generation as a discrete
|
||||
stochastic process--which allows for an information-theoretic analysis--can
|
||||
provide new insights into the behavior of probabilistic language generators,
|
||||
e.g., why high-probability texts can be dull or repetitive. Humans use language
|
||||
as a means of communicating information, aiming to do so in a simultaneously
|
||||
efficient and error-minimizing manner; in fact, psycholinguistics research
|
||||
suggests humans choose each word in a string with this subconscious goal in
|
||||
mind. We formally define the set of strings that meet this criterion: those for
|
||||
which each word has an information content close to the expected information
|
||||
content, i.e., the conditional entropy of our model. We then propose a simple
|
||||
and efficient procedure for enforcing this criterion when generating from
|
||||
probabilistic models, which we call locally typical sampling. Automatic and
|
||||
human evaluations show that, in comparison to nucleus and top-k sampling,
|
||||
locally typical sampling offers competitive performance (in both abstractive
|
||||
summarization and story generation) in terms of quality while consistently
|
||||
reducing degenerate repetitions.
|
||||
|
||||
## Learning Transferable Visual Models From Natural Language Supervision
|
||||
|
||||
- **arXiv id:** 2103.00020v1
|
||||
- **Title:** Learning Transferable Visual Models From Natural Language Supervision
|
||||
- **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.
|
||||
- **Published Date:** 2021-02-26
|
||||
- **URL:** http://arxiv.org/abs/2103.00020v1
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
|
||||
|
||||
**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set
|
||||
of predetermined object categories. This restricted form of supervision limits
|
||||
their generality and usability since additional labeled data is needed to
|
||||
specify any other visual concept. Learning directly from raw text about images
|
||||
is a promising alternative which leverages a much broader source of
|
||||
supervision. We demonstrate that the simple pre-training task of predicting
|
||||
which caption goes with which image is an efficient and scalable way to learn
|
||||
SOTA image representations from scratch on a dataset of 400 million (image,
|
||||
text) pairs collected from the internet. After pre-training, natural language
|
||||
is used to reference learned visual concepts (or describe new ones) enabling
|
||||
zero-shot transfer of the model to downstream tasks. We study the performance
|
||||
of this approach by benchmarking on over 30 different existing computer vision
|
||||
datasets, spanning tasks such as OCR, action recognition in videos,
|
||||
geo-localization, and many types of fine-grained object classification. The
|
||||
model transfers non-trivially to most tasks and is often competitive with a
|
||||
fully supervised baseline without the need for any dataset specific training.
|
||||
For instance, we match the accuracy of the original ResNet-50 on ImageNet
|
||||
zero-shot without needing to use any of the 1.28 million training examples it
|
||||
was trained on. We release our code and pre-trained model weights at
|
||||
https://github.com/OpenAI/CLIP.
|
||||
|
||||
## CTRL: A Conditional Transformer Language Model for Controllable Generation
|
||||
|
||||
- **arXiv id:** 1909.05858v2
|
||||
- **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation
|
||||
- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
|
||||
- **Published Date:** 2019-09-11
|
||||
- **URL:** http://arxiv.org/abs/1909.05858v2
|
||||
- **LangChain:**
|
||||
|
||||
- **API Reference:** [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference)
|
||||
|
||||
**Abstract:** Large-scale language models show promising text generation capabilities, but
|
||||
users cannot easily control particular aspects of the generated text. We
|
||||
release CTRL, a 1.63 billion-parameter conditional transformer language model,
|
||||
trained to condition on control codes that govern style, content, and
|
||||
task-specific behavior. Control codes were derived from structure that
|
||||
naturally co-occurs with raw text, preserving the advantages of unsupervised
|
||||
learning while providing more explicit control over text generation. These
|
||||
codes also allow CTRL to predict which parts of the training data are most
|
||||
likely given a sequence. This provides a potential method for analyzing large
|
||||
amounts of data via model-based source attribution. We have released multiple
|
||||
full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
|
||||
|
||||
## Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
|
||||
|
||||
- **arXiv id:** 1908.10084v1
|
||||
- **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
|
||||
- **Authors:** Nils Reimers, Iryna Gurevych
|
||||
- **Published Date:** 2019-08-27
|
||||
- **URL:** http://arxiv.org/abs/1908.10084v1
|
||||
- **LangChain:**
|
||||
|
||||
- **Documentation:** [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
|
||||
|
||||
**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new
|
||||
state-of-the-art performance on sentence-pair regression tasks like semantic
|
||||
textual similarity (STS). However, it requires that both sentences are fed into
|
||||
the network, which causes a massive computational overhead: Finding the most
|
||||
similar pair in a collection of 10,000 sentences requires about 50 million
|
||||
inference computations (~65 hours) with BERT. The construction of BERT makes it
|
||||
unsuitable for semantic similarity search as well as for unsupervised tasks
|
||||
like clustering.
|
||||
In this publication, we present Sentence-BERT (SBERT), a modification of the
|
||||
pretrained BERT network that use siamese and triplet network structures to
|
||||
derive semantically meaningful sentence embeddings that can be compared using
|
||||
cosine-similarity. This reduces the effort for finding the most similar pair
|
||||
from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while
|
||||
maintaining the accuracy from BERT.
|
||||
We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning
|
||||
tasks, where it outperforms other state-of-the-art sentence embeddings methods.
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)
|
||||
### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)
|
||||
### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)
|
||||
### [by BobLin (Chinese language)](https://www.youtube.com/playlist?list=PLbd7ntv6PxC3QMFQvtWfk55p-Op_syO1C)
|
||||
|
||||
## Courses
|
||||
|
||||
@@ -45,7 +46,6 @@
|
||||
- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
|
||||
- [Dive into Langchain (Chinese language)](https://langchain.boblin.app/)
|
||||
|
||||
---------------------
|
||||
|
||||
|
||||
|
||||
@@ -1,137 +1,63 @@
|
||||
# YouTube videos
|
||||
|
||||
⛓ icon marks a new addition [last update 2023-09-21]
|
||||
[Updated 2024-05-16]
|
||||
|
||||
### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)
|
||||
|
||||
### Introduction to LangChain with Harrison Chase, creator of LangChain
|
||||
- [Building the Future with LLMs, `LangChain`, & `Pinecone`](https://youtu.be/nMniwlGyX-c) by [Pinecone](https://www.youtube.com/@pinecone-io)
|
||||
- [LangChain and Weaviate with Harrison Chase and Bob van Luijt - Weaviate Podcast #36](https://youtu.be/lhby7Ql7hbk) by [Weaviate • Vector Database](https://www.youtube.com/@Weaviate)
|
||||
- [LangChain Demo + Q&A with Harrison Chase](https://youtu.be/zaYTXQFR0_s?t=788) by [Full Stack Deep Learning](https://www.youtube.com/@The_Full_Stack)
|
||||
- [LangChain Agents: Build Personal Assistants For Your Data (Q&A with Harrison Chase and Mayo Oshin)](https://youtu.be/gVkF8cwfBLI) by [Chat with data](https://www.youtube.com/@chatwithdata)
|
||||
### [Tutorials on YouTube](/docs/additional_resources/tutorials/#tutorials)
|
||||
|
||||
## Videos (sorted by views)
|
||||
|
||||
- [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain OpenAI API)](https://youtu.be/9AXP7tCI9PI) by [TechLead](https://www.youtube.com/@TechLead)
|
||||
- [First look - `ChatGPT` + `WolframAlpha` (`GPT-3.5` and Wolfram|Alpha via LangChain by James Weaver)](https://youtu.be/wYGbY811oMo) by [Dr Alan D. Thompson](https://www.youtube.com/@DrAlanDThompson)
|
||||
- [LangChain explained - The hottest new Python framework](https://youtu.be/RoR4XJw8wIc) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- [Chatbot with INFINITE MEMORY using `OpenAI` & `Pinecone` - `GPT-3`, `Embeddings`, `ADA`, `Vector DB`, `Semantic`](https://youtu.be/2xNzB7xq8nk) by [David Shapiro ~ AI](https://www.youtube.com/@DaveShap)
|
||||
- [LangChain for LLMs is... basically just an Ansible playbook](https://youtu.be/X51N9C-OhlE) by [David Shapiro ~ AI](https://www.youtube.com/@DaveShap)
|
||||
- [Build your own LLM Apps with LangChain & `GPT-Index`](https://youtu.be/-75p09zFUJY) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [`BabyAGI` - New System of Autonomous AI Agents with LangChain](https://youtu.be/lg3kJvf1kXo) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [Run `BabyAGI` with Langchain Agents (with Python Code)](https://youtu.be/WosPGHPObx8) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- [How to Use Langchain With `Zapier` | Write and Send Email with GPT-3 | OpenAI API Tutorial](https://youtu.be/p9v2-xEa9A0) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [Use Your Locally Stored Files To Get Response From GPT - `OpenAI` | Langchain | Python](https://youtu.be/NC1Ni9KS-rk) by [Shweta Lodha](https://www.youtube.com/@shweta-lodha)
|
||||
- [`Langchain JS` | How to Use GPT-3, GPT-4 to Reference your own Data | `OpenAI Embeddings` Intro](https://youtu.be/veV2I-NEjaM) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [The easiest way to work with large language models | Learn LangChain in 10min](https://youtu.be/kmbS6FDQh7c) by [Sophia Yang](https://www.youtube.com/@SophiaYangDS)
|
||||
- [4 Autonomous AI Agents: “Westworld” simulation `BabyAGI`, `AutoGPT`, `Camel`, `LangChain`](https://youtu.be/yWbnH6inT_U) by [Sophia Yang](https://www.youtube.com/@SophiaYangDS)
|
||||
- [AI CAN SEARCH THE INTERNET? Langchain Agents + OpenAI ChatGPT](https://youtu.be/J-GL0htqda8) by [tylerwhatsgood](https://www.youtube.com/@tylerwhatsgood)
|
||||
- [Query Your Data with GPT-4 | Embeddings, Vector Databases | Langchain JS Knowledgebase](https://youtu.be/jRnUPUTkZmU) by [StarMorph AI](https://www.youtube.com/@starmorph)
|
||||
- [`Weaviate` + LangChain for LLM apps presented by Erika Cardenas](https://youtu.be/7AGj4Td5Lgw) by [`Weaviate` • Vector Database](https://www.youtube.com/@Weaviate)
|
||||
- [Langchain Overview — How to Use Langchain & `ChatGPT`](https://youtu.be/oYVYIq0lOtI) by [Python In Office](https://www.youtube.com/@pythoninoffice6568)
|
||||
- [Langchain Overview - How to Use Langchain & `ChatGPT`](https://youtu.be/oYVYIq0lOtI) by [Python In Office](https://www.youtube.com/@pythoninoffice6568)
|
||||
- [LangChain Tutorials](https://www.youtube.com/watch?v=FuqdVNB_8c0&list=PL9V0lbeJ69brU-ojMpU1Y7Ic58Tap0Cw6) by [Edrick](https://www.youtube.com/@edrickdch):
|
||||
- [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
- [LangChain 101: The Complete Beginner's Guide](https://youtu.be/P3MAbZ2eMUI)
|
||||
- [Custom langchain Agent & Tools with memory. Turn any `Python function` into langchain tool with Gpt 3](https://youtu.be/NIG8lXk0ULg) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [Building AI LLM Apps with LangChain (and more?) - LIVE STREAM](https://www.youtube.com/live/M-2Cj_2fzWI?feature=share) by [Nicholas Renotte](https://www.youtube.com/@NicholasRenotte)
|
||||
- [`ChatGPT` with any `YouTube` video using langchain and `chromadb`](https://youtu.be/TQZfB2bzVwU) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [How to Talk to a `PDF` using LangChain and `ChatGPT`](https://youtu.be/v2i1YDtrIwk) by [Automata Learning Lab](https://www.youtube.com/@automatalearninglab)
|
||||
- [Langchain Document Loaders Part 1: Unstructured Files](https://youtu.be/O5C0wfsen98) by [Merk](https://www.youtube.com/@heymichaeldaigler)
|
||||
- [LangChain - Prompt Templates (what all the best prompt engineers use)](https://youtu.be/1aRu8b0XNOQ) by [Nick Daigler](https://www.youtube.com/@nickdaigler)
|
||||
- [LangChain. Crear aplicaciones Python impulsadas por GPT](https://youtu.be/DkW_rDndts8) by [Jesús Conde](https://www.youtube.com/@0utKast)
|
||||
- [Easiest Way to Use GPT In Your Products | LangChain Basics Tutorial](https://youtu.be/fLy0VenZyGc) by [Rachel Woods](https://www.youtube.com/@therachelwoods)
|
||||
- [`BabyAGI` + `GPT-4` Langchain Agent with Internet Access](https://youtu.be/wx1z_hs5P6E) by [tylerwhatsgood](https://www.youtube.com/@tylerwhatsgood)
|
||||
- [Learning LLM Agents. How does it actually work? LangChain, AutoGPT & OpenAI](https://youtu.be/mb_YAABSplk) by [Arnoldas Kemeklis](https://www.youtube.com/@processusAI)
|
||||
- [Get Started with LangChain in `Node.js`](https://youtu.be/Wxx1KUWJFv4) by [Developers Digest](https://www.youtube.com/@DevelopersDigest)
|
||||
- [LangChain + `OpenAI` tutorial: Building a Q&A system w/ own text data](https://youtu.be/DYOU_Z0hAwo) by [Samuel Chan](https://www.youtube.com/@SamuelChan)
|
||||
- [Langchain + `Zapier` Agent](https://youtu.be/yribLAb-pxA) by [Merk](https://www.youtube.com/@heymichaeldaigler)
|
||||
- [Connecting the Internet with `ChatGPT` (LLMs) using Langchain And Answers Your Questions](https://youtu.be/9Y0TBC63yZg) by [Kamalraj M M](https://www.youtube.com/@insightbuilder)
|
||||
- [Build More Powerful LLM Applications for Business’s with LangChain (Beginners Guide)](https://youtu.be/sp3-WLKEcBg) by[ No Code Blackbox](https://www.youtube.com/@nocodeblackbox)
|
||||
- [LangFlow LLM Agent Demo for 🦜🔗LangChain](https://youtu.be/zJxDHaWt-6o) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [Chatbot Factory: Streamline Python Chatbot Creation with LLMs and Langchain](https://youtu.be/eYer3uzrcuM) by [Finxter](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [LangChain Tutorial - ChatGPT mit eigenen Daten](https://youtu.be/0XDLyY90E2c) by [Coding Crashkurse](https://www.youtube.com/@codingcrashkurse6429)
|
||||
- [Chat with a `CSV` | LangChain Agents Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [GoDataProf](https://www.youtube.com/@godataprof)
|
||||
- [Introdução ao Langchain - #Cortes - Live DataHackers](https://youtu.be/fw8y5VRei5Y) by [Prof. João Gabriel Lima](https://www.youtube.com/@profjoaogabriellima)
|
||||
- [LangChain: Level up `ChatGPT` !? | LangChain Tutorial Part 1](https://youtu.be/vxUGx8aZpDE) by [Code Affinity](https://www.youtube.com/@codeaffinitydev)
|
||||
- [KI schreibt krasses Youtube Skript 😲😳 | LangChain Tutorial Deutsch](https://youtu.be/QpTiXyK1jus) by [SimpleKI](https://www.youtube.com/@simpleki)
|
||||
- [Chat with Audio: Langchain, `Chroma DB`, OpenAI, and `Assembly AI`](https://youtu.be/Kjy7cx1r75g) by [AI Anytime](https://www.youtube.com/@AIAnytime)
|
||||
- [QA over documents with Auto vector index selection with Langchain router chains](https://youtu.be/9G05qybShv8) by [echohive](https://www.youtube.com/@echohive)
|
||||
- [Build your own custom LLM application with `Bubble.io` & Langchain (No Code & Beginner friendly)](https://youtu.be/O7NhQGu1m6c) by [No Code Blackbox](https://www.youtube.com/@nocodeblackbox)
|
||||
- [Simple App to Question Your Docs: Leveraging `Streamlit`, `Hugging Face Spaces`, LangChain, and `Claude`!](https://youtu.be/X4YbNECRr7o) by [Chris Alexiuk](https://www.youtube.com/@chrisalexiuk)
|
||||
- [LANGCHAIN AI- `ConstitutionalChainAI` + Databutton AI ASSISTANT Web App](https://youtu.be/5zIU6_rdJCU) by [Avra](https://www.youtube.com/@Avra_b)
|
||||
- [LANGCHAIN AI AUTONOMOUS AGENT WEB APP - 👶 `BABY AGI` 🤖 with EMAIL AUTOMATION using `DATABUTTON`](https://youtu.be/cvAwOGfeHgw) by [Avra](https://www.youtube.com/@Avra_b)
|
||||
- [The Future of Data Analysis: Using A.I. Models in Data Analysis (LangChain)](https://youtu.be/v_LIcVyg5dk) by [Absent Data](https://www.youtube.com/@absentdata)
|
||||
- [Memory in LangChain | Deep dive (python)](https://youtu.be/70lqvTFh_Yg) by [Eden Marco](https://www.youtube.com/@EdenMarco)
|
||||
- [9 LangChain UseCases | Beginner's Guide | 2023](https://youtu.be/zS8_qosHNMw) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [Use Large Language Models in Jupyter Notebook | LangChain | Agents & Indexes](https://youtu.be/JSe11L1a_QQ) by [Abhinaw Tiwari](https://www.youtube.com/@AbhinawTiwariAT)
|
||||
- [How to Talk to Your Langchain Agent | `11 Labs` + `Whisper`](https://youtu.be/N4k459Zw2PU) by [VRSEN](https://www.youtube.com/@vrsen)
|
||||
- [LangChain Deep Dive: 5 FUN AI App Ideas To Build Quickly and Easily](https://youtu.be/mPYEPzLkeks) by [James NoCode](https://www.youtube.com/@jamesnocode)
|
||||
- [LangChain 101: Models](https://youtu.be/T6c_XsyaNSQ) by [Mckay Wrigley](https://www.youtube.com/@realmckaywrigley)
|
||||
- [LangChain with JavaScript Tutorial #1 | Setup & Using LLMs](https://youtu.be/W3AoeMrg27o) by [Leon van Zyl](https://www.youtube.com/@leonvanzyl)
|
||||
- [LangChain Overview & Tutorial for Beginners: Build Powerful AI Apps Quickly & Easily (ZERO CODE)](https://youtu.be/iI84yym473Q) by [James NoCode](https://www.youtube.com/@jamesnocode)
|
||||
- [LangChain In Action: Real-World Use Case With Step-by-Step Tutorial](https://youtu.be/UO699Szp82M) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- [Summarizing and Querying Multiple Papers with LangChain](https://youtu.be/p_MQRWH5Y6k) by [Automata Learning Lab](https://www.youtube.com/@automatalearninglab)
|
||||
- [Using Langchain (and `Replit`) through `Tana`, ask `Google`/`Wikipedia`/`Wolfram Alpha` to fill out a table](https://youtu.be/Webau9lEzoI) by [Stian Håklev](https://www.youtube.com/@StianHaklev)
|
||||
- [Langchain PDF App (GUI) | Create a ChatGPT For Your `PDF` in Python](https://youtu.be/wUAUdEw5oxM) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Auto-GPT with LangChain 🔥 | Create Your Own Personal AI Assistant](https://youtu.be/imDfPmMKEjM) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [Create Your OWN Slack AI Assistant with Python & LangChain](https://youtu.be/3jFXRNn2Bu8) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- [How to Create LOCAL Chatbots with GPT4All and LangChain [Full Guide]](https://youtu.be/4p1Fojur8Zw) by [Liam Ottley](https://www.youtube.com/@LiamOttley)
|
||||
- [Build a `Multilingual PDF` Search App with LangChain, `Cohere` and `Bubble`](https://youtu.be/hOrtuumOrv8) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [Building a LangChain Agent (code-free!) Using `Bubble` and `Flowise`](https://youtu.be/jDJIIVWTZDE) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [Build a LangChain-based Semantic PDF Search App with No-Code Tools Bubble and Flowise](https://youtu.be/s33v5cIeqA4) by [Menlo Park Lab](https://www.youtube.com/@menloparklab)
|
||||
- [LangChain Memory Tutorial | Building a ChatGPT Clone in Python](https://youtu.be/Cwq91cj2Pnc) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [ChatGPT For Your DATA | Chat with Multiple Documents Using LangChain](https://youtu.be/TeDgIDqQmzs) by [Data Science Basics](https://www.youtube.com/@datasciencebasics)
|
||||
- [`Llama Index`: Chat with Documentation using URL Loader](https://youtu.be/XJRoDEctAwA) by [Merk](https://www.youtube.com/@heymichaeldaigler)
|
||||
- [Using OpenAI, LangChain, and `Gradio` to Build Custom GenAI Applications](https://youtu.be/1MsmqMg3yUc) by [David Hundley](https://www.youtube.com/@dkhundley)
|
||||
- [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
|
||||
- [Build AI chatbot with custom knowledge base using OpenAI API and GPT Index](https://youtu.be/vDZAZuaXf48) by [Irina Nik](https://www.youtube.com/@irina_nik)
|
||||
- [Build Your Own Auto-GPT Apps with LangChain (Python Tutorial)](https://youtu.be/NYSWn1ipbgg) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Chat with a `CSV` | `LangChain Agents` Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
|
||||
- [Create Your Own ChatGPT with `PDF` Data in 5 Minutes (LangChain Tutorial)](https://youtu.be/au2WVVGUvc8) by [Liam Ottley](https://www.youtube.com/@LiamOttley)
|
||||
- [Build a Custom Chatbot with OpenAI: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU) by [Fabrikod](https://www.youtube.com/@fabrikod)
|
||||
- [`Flowise` is an open-source no-code UI visual tool to build 🦜🔗LangChain applications](https://youtu.be/CovAPtQPU0k) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA)
|
||||
- [LangChain & GPT 4 For Data Analysis: The `Pandas` Dataframe Agent](https://youtu.be/rFQ5Kmkd4jc) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw) by [Girlfriend GPT](https://www.youtube.com/@girlfriendGPT)
|
||||
- [How to build with Langchain 10x easier | ⛓️ LangFlow & `Flowise`](https://youtu.be/Ya1oGL7ZTvU) by [AI Jason](https://www.youtube.com/@AIJasonZ)
|
||||
- [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg) by [Krish Naik](https://www.youtube.com/@krishnaik06)
|
||||
- ⛓ [Vector Embeddings Tutorial – Code Your Own AI Assistant with `GPT-4 API` + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=5uJhxoh2tvdnOXok) by [FreeCodeCamp.org](https://www.youtube.com/@freecodecamp)
|
||||
- ⛓ [Fully LOCAL `Llama 2` Q&A with LangChain](https://youtu.be/wgYctKFnQ74?si=UX1F3W-B3MqF4-K-) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- ⛓ [Fully LOCAL `Llama 2` Langchain on CPU](https://youtu.be/yhECvKMu8kM?si=IvjxwlA1c09VwHZ4) by [1littlecoder](https://www.youtube.com/@1littlecoder)
|
||||
- ⛓ [Build LangChain Audio Apps with Python in 5 Minutes](https://youtu.be/7w7ysaDz2W4?si=BvdMiyHhormr2-vr) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- ⛓ [`Voiceflow` & `Flowise`: Want to Beat Competition? New Tutorial with Real AI Chatbot](https://youtu.be/EZKkmeFwag0?si=-4dETYDHEstiK_bb) by [AI SIMP](https://www.youtube.com/@aisimp)
|
||||
- ⛓ [THIS Is How You Build Production-Ready AI Apps (`LangSmith` Tutorial)](https://youtu.be/tFXm5ijih98?si=lfiqpyaivxHFyI94) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
|
||||
- ⛓ [Build POWERFUL LLM Bots EASILY with Your Own Data - `Embedchain` - Langchain 2.0? (Tutorial)](https://youtu.be/jE24Y_GasE8?si=0yEDZt3BK5Q-LIuF) by [WorldofAI](https://www.youtube.com/@intheworldofai)
|
||||
- ⛓ [`Code Llama` powered Gradio App for Coding: Runs on CPU](https://youtu.be/AJOhV6Ryy5o?si=ouuQT6IghYlc1NEJ) by [AI Anytime](https://www.youtube.com/@AIAnytime)
|
||||
- ⛓ [LangChain Complete Course in One Video | Develop LangChain (AI) Based Solutions for Your Business](https://youtu.be/j9mQd-MyIg8?si=_wlNT3nP2LpDKztZ) by [UBprogrammer](https://www.youtube.com/@UBprogrammer)
|
||||
- ⛓ [How to Run `LLaMA` Locally on CPU or GPU | Python & Langchain & CTransformers Guide](https://youtu.be/SvjWDX2NqiM?si=DxFml8XeGhiLTzLV) by [Code With Prince](https://www.youtube.com/@CodeWithPrince)
|
||||
- ⛓ [PyData Heidelberg #11 - TimeSeries Forecasting & LLM Langchain](https://www.youtube.com/live/Glbwb5Hxu18?si=PIEY8Raq_C9PCHuW) by [PyData](https://www.youtube.com/@PyDataTV)
|
||||
- ⛓ [Prompt Engineering in Web Development | Using LangChain and Templates with OpenAI](https://youtu.be/pK6WzlTOlYw?si=fkcDQsBG2h-DM8uQ) by [Akamai Developer
|
||||
](https://www.youtube.com/@AkamaiDeveloper)
|
||||
- ⛓ [Retrieval-Augmented Generation (RAG) using LangChain and `Pinecone` - The RAG Special Episode](https://youtu.be/J_tCD_J6w3s?si=60Mnr5VD9UED9bGG) by [Generative AI and Data Science On AWS](https://www.youtube.com/@GenerativeAIOnAWS)
|
||||
- ⛓ [`LLAMA2 70b-chat` Multiple Documents Chatbot with Langchain & Streamlit |All OPEN SOURCE|Replicate API](https://youtu.be/vhghB81vViM?si=dszzJnArMeac7lyc) by [DataInsightEdge](https://www.youtube.com/@DataInsightEdge01)
|
||||
- ⛓ [Chatting with 44K Fashion Products: LangChain Opportunities and Pitfalls](https://youtu.be/Zudgske0F_s?si=8HSshHoEhh0PemJA) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
- ⛓ [Structured Data Extraction from `ChatGPT` with LangChain](https://youtu.be/q1lYg8JISpQ?si=0HctzOHYZvq62sve) by [MG](https://www.youtube.com/@MG_cafe)
|
||||
- ⛓ [Chat with Multiple PDFs using `Llama 2`, `Pinecone` and LangChain (Free LLMs and Embeddings)](https://youtu.be/TcJ_tVSGS4g?si=FZYnMDJyoFfL3Z2i) by [Muhammad Moin](https://www.youtube.com/@muhammadmoinfaisal)
|
||||
- ⛓ [Integrate Audio into `LangChain.js` apps in 5 Minutes](https://youtu.be/hNpUSaYZIzs?si=Gb9h7W9A8lzfvFKi) by [AssemblyAI](https://www.youtube.com/@AssemblyAI)
|
||||
- ⛓ [`ChatGPT` for your data with Local LLM](https://youtu.be/bWrjpwhHEMU?si=uM6ZZ18z9og4M90u) by [Jacob Jedryszek](https://www.youtube.com/@jj09)
|
||||
- ⛓ [Training `Chatgpt` with your personal data using langchain step by step in detail](https://youtu.be/j3xOMde2v9Y?si=179HsiMU-hEPuSs4) by [NextGen Machines](https://www.youtube.com/@MayankGupta-kb5yc)
|
||||
- ⛓ [Use ANY language in `LangSmith` with REST](https://youtu.be/7BL0GEdMmgY?si=iXfOEdBLqXF6hqRM) by [Nerding I/O](https://www.youtube.com/@nerding_io)
|
||||
- ⛓ [How to Leverage the Full Potential of LLMs for Your Business with Langchain - Leon Ruddat](https://youtu.be/vZmoEa7oWMg?si=ZhMmydq7RtkZd56Q) by [PyData](https://www.youtube.com/@PyDataTV)
|
||||
- ⛓ [`ChatCSV` App: Chat with CSV files using LangChain and `Llama 2`](https://youtu.be/PvsMg6jFs8E?si=Qzg5u5gijxj933Ya) by [Muhammad Moin](https://www.youtube.com/@muhammadmoinfaisal)
|
||||
- ⛓ [Build Chat PDF app in Python with LangChain, OpenAI, Streamlit | Full project | Learn Coding](https://www.youtube.com/watch?v=WYzFzZg4YZI) by [Jutsupoint](https://www.youtube.com/@JutsuPoint)
|
||||
- ⛓ [Build Eminem Bot App with LangChain, Streamlit, OpenAI | Full Python Project | Tutorial | AI ChatBot](https://www.youtube.com/watch?v=a2shHB4MRZ4) by [Jutsupoint](https://www.youtube.com/@JutsuPoint)
|
||||
|
||||
|
||||
### [Prompt Engineering and LangChain](https://www.youtube.com/watch?v=muXbPpG_ys4&list=PLEJK-H61Xlwzm5FYLDdKt_6yibO33zoMW) by [Venelin Valkov](https://www.youtube.com/@venelin_valkov)
|
||||
- [Getting Started with LangChain: Load Custom Data, Run OpenAI Models, Embeddings and `ChatGPT`](https://www.youtube.com/watch?v=muXbPpG_ys4)
|
||||
- [Loaders, Indexes & Vectorstores in LangChain: Question Answering on `PDF` files with `ChatGPT`](https://www.youtube.com/watch?v=FQnvfR8Dmr0)
|
||||
- [LangChain Models: `ChatGPT`, `Flan Alpaca`, `OpenAI Embeddings`, Prompt Templates & Streaming](https://www.youtube.com/watch?v=zy6LiK5F5-s)
|
||||
- [LangChain Chains: Use `ChatGPT` to Build Conversational Agents, Summaries and Q&A on Text With LLMs](https://www.youtube.com/watch?v=h1tJZQPcimM)
|
||||
- [Analyze Custom CSV Data with `GPT-4` using Langchain](https://www.youtube.com/watch?v=Ew3sGdX8at4)
|
||||
- [Build ChatGPT Chatbots with LangChain Memory: Understanding and Implementing Memory in Conversations](https://youtu.be/CyuUlf54wTs)
|
||||
Only videos with 40K+ views:
|
||||
|
||||
- [Using `ChatGPT` with YOUR OWN Data. This is magical. (LangChain `OpenAI API`)](https://youtu.be/9AXP7tCI9PI)
|
||||
- [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg?si=pjXKhsHRzn10vOqX)
|
||||
- [`Hugging Face` + Langchain in 5 mins | Access 200k+ FREE AI models for your AI apps](https://youtu.be/_j7JEDWuqLE?si=psimQscN3qo2dOa9)
|
||||
- [LangChain Crash Course For Beginners | LangChain Tutorial](https://youtu.be/nAmC7SoVLd8?si=qJdvyG5-rnjqfdj1)
|
||||
- [Vector Embeddings Tutorial – Code Your Own AI Assistant with GPT-4 API + LangChain + NLP](https://youtu.be/yfHHvmaMkcA?si=UBP3yw50cLm3a2nj)
|
||||
- [Development with Large Language Models Tutorial – `OpenAI`, Langchain, Agents, `Chroma`](https://youtu.be/xZDB1naRUlk?si=v8J1q6oFHRyTkf7Y)
|
||||
- [Langchain: `PDF` Chat App (GUI) | ChatGPT for Your PDF FILES | Step-by-Step Tutorial](https://youtu.be/RIWbalZ7sTo?si=LbKsCcuyv0BtnrTY)
|
||||
- [Vector Search `RAG` Tutorial – Combine Your Data with LLMs with Advanced Search](https://youtu.be/JEBDfGqrAUA?si=pD7oxpfwWeJCxfBt)
|
||||
- [LangChain Crash Course for Beginners](https://youtu.be/lG7Uxts9SXs?si=Yte4S5afN7KNCw0F)
|
||||
- [Learn `RAG` From Scratch – Python AI Tutorial from a LangChain Engineer](https://youtu.be/sVcwVQRHIc8?si=_LN4g0vOgSdtlB3S)
|
||||
- [`Llama 2` in LangChain — FIRST Open Source Conversational Agent!](https://youtu.be/6iHVJyX2e50?si=rtq1maPrzWKHbwVV)
|
||||
- [LangChain Tutorial for Beginners | Generative AI Series](https://youtu.be/cQUUkZnyoD0?si=KYz-bvcocdqGh9f_)
|
||||
- [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=yS7T98VLfcWdkDek)
|
||||
- [LangChain Explained In 15 Minutes - A MUST Learn For Python Programmers](https://youtu.be/mrjq3lFz23s?si=wkQGcSKUJjuiiEPf)
|
||||
- [LLM Project | End to End LLM Project Using Langchain, `OpenAI` in Finance Domain](https://youtu.be/MoqgmWV1fm8?si=oVl-5kJVgd3a07Y_)
|
||||
- [What is LangChain?](https://youtu.be/1bUy-1hGZpI?si=NZ0D51VM5y-DhjGe)
|
||||
- [`RAG` + Langchain Python Project: Easy AI/Chat For Your Doc](https://youtu.be/tcqEUSNCn8I?si=RLcWPBVLIErRqdmU)
|
||||
- [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg?si=X9qVazlXYucN_JBP)
|
||||
- [LangChain GEN AI Tutorial – 6 End-to-End Projects using OpenAI, Google `Gemini Pro`, `LLAMA2`](https://youtu.be/x0AnCE9SE4A?si=_92gJYm7kb-V2bi0)
|
||||
- [Complete Langchain GEN AI Crash Course With 6 End To End LLM Projects With OPENAI, `LLAMA2`, `Gemini Pro`](https://youtu.be/aWKrL4z5H6w?si=NVLi7Yiq0ccE7xXE)
|
||||
- [AI Leader Reveals The Future of AI AGENTS (LangChain CEO)](https://youtu.be/9ZhbA0FHZYc?si=1r4P6kRvKVvEhRgE)
|
||||
- [Learn How To Query Pdf using Langchain Open AI in 5 min](https://youtu.be/5Ghv-F1wF_0?si=ZZRjrWfeiFOVrcvu)
|
||||
- [Reliable, fully local RAG agents with `LLaMA3`](https://youtu.be/-ROS6gfYIts?si=75CXA8W_BbnkIxcV)
|
||||
- [Learn `LangChain.js` - Build LLM apps with JavaScript and `OpenAI`](https://youtu.be/HSZ_uaif57o?si=Icj-RAhwMT-vHaYA)
|
||||
- [LLM Project | End to End LLM Project Using LangChain, Google Palm In Ed-Tech Industry](https://youtu.be/AjQPRomyd-k?si=eC3NT6kn02Lhpz-_)
|
||||
- [Chatbot Answering from Your Own Knowledge Base: Langchain, `ChatGPT`, `Pinecone`, and `Streamlit`: | Code](https://youtu.be/nAKhxQ3hcMA?si=9Zd_Nd_jiYhtml5w)
|
||||
- [LangChain is AMAZING | Quick Python Tutorial](https://youtu.be/I4mFqyqFkxg?si=aJ66qh558OfNAczD)
|
||||
- [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw?si=kZR-lnJwixeVrjmh)
|
||||
- [Using NEW `MPT-7B` in `Hugging Face` and LangChain](https://youtu.be/DXpk9K7DgMo?si=99JDpV_ueimwJhMi)
|
||||
- [LangChain - COMPLETE TUTORIAL - Basics to advanced concept!](https://youtu.be/a89vqgK-Qcs?si=0aVO2EOqsw7GE5e3)
|
||||
- [LangChain Agents: Simply Explained!](https://youtu.be/Xi9Ui-9qcPw?si=DCuG7nGx8dxcfhkx)
|
||||
- [Chat With Multiple `PDF` Documents With Langchain And Google `Gemini Pro`](https://youtu.be/uus5eLz6smA?si=YUwvHtaZsGeIl0WD)
|
||||
- [LLM Project | End to end LLM project Using Langchain, `Google Palm` in Retail Industry](https://youtu.be/4wtrl4hnPT8?si=_eOKPpdLfWu5UXMQ)
|
||||
- [Tutorial | Chat with any Website using Python and Langchain](https://youtu.be/bupx08ZgSFg?si=KRrjYZFnuLsstGwW)
|
||||
- [Prompt Engineering And LLM's With LangChain In One Shot-Generative AI](https://youtu.be/t2bSApmPzU4?si=87vPQQtYEWTyu2Kx)
|
||||
- [Build a Custom Chatbot with `OpenAI`: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU?si=gR1u3DUG9lvzBIKK)
|
||||
- [Search Your `PDF` App using Langchain, `ChromaDB`, and Open Source LLM: No OpenAI API (Runs on CPU)](https://youtu.be/rIV1EseKwU4?si=UxZEoXSiPai8fXgl)
|
||||
- [Building a `RAG` application from scratch using Python, LangChain, and the `OpenAI API`](https://youtu.be/BrsocJb-fAo?si=hvkh9iTGzJ-LnsX-)
|
||||
- [Function Calling via `ChatGPT API` - First Look With LangChain](https://youtu.be/0-zlUy7VUjg?si=Vc6LFseckEc6qvuk)
|
||||
- [Private GPT, free deployment! Langchain-Chachat helps you easily play with major mainstream AI models! | Zero Degree Commentary](https://youtu.be/3LLUyaHP-3I?si=AZumEeFXsvqaLl0f)
|
||||
- [Create a ChatGPT clone using `Streamlit` and LangChain](https://youtu.be/IaTiyQ2oYUQ?si=WbgsYmqPDnMidSUK)
|
||||
- [What's next for AI agents ft. LangChain's Harrison Chase](https://youtu.be/pBBe1pk8hf4?si=H4vdBF9nmkNZxiHt)
|
||||
- [`LangFlow`: Build Chatbots without Writing Code - LangChain](https://youtu.be/KJ-ux3hre4s?si=TJuDu4bAlva1myNL)
|
||||
- [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws?si=wymYad26VgigRkHy)
|
||||
- [`Ollama` meets LangChain](https://youtu.be/k_1pOF1mj8k?si=RlBiCrmaR3s7SnMK)
|
||||
- [End To End LLM Langchain Project using `Pinecone` Vector Database](https://youtu.be/erUfLIi9OFM?si=aHpuHXdIEmAfS4eF)
|
||||
- [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=FUs0OLVJpzKhut0h)
|
||||
- [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs?si=imgj534ggxlypS0d)
|
||||
|
||||
---------------------
|
||||
⛓ icon marks a new addition [last update 2024-02-04]
|
||||
[Updated 2024-05-16]
|
||||
|
||||
@@ -11,7 +11,7 @@ LangChain as a framework consists of a number of packages.
|
||||
|
||||
### `langchain-core`
|
||||
This package contains base abstractions of different components and ways to compose them together.
|
||||
The interfaces for core components like LLMs, vectorstores, retrievers and more are defined here.
|
||||
The interfaces for core components like LLMs, vector stores, retrievers and more are defined here.
|
||||
No third party integrations are defined here.
|
||||
The dependencies are kept purposefully very lightweight.
|
||||
|
||||
@@ -30,21 +30,21 @@ All chains, agents, and retrieval strategies here are NOT specific to any one in
|
||||
|
||||
This package contains third party integrations that are maintained by the LangChain community.
|
||||
Key partner packages are separated out (see below).
|
||||
This contains all integrations for various components (LLMs, vectorstores, retrievers).
|
||||
This contains all integrations for various components (LLMs, vector stores, retrievers).
|
||||
All dependencies in this package are optional to keep the package as lightweight as possible.
|
||||
|
||||
### [`langgraph`](/docs/langgraph)
|
||||
### [`langgraph`](https://langchain-ai.github.io/langgraph)
|
||||
|
||||
`langgraph` is an extension of `langchain` aimed at
|
||||
building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for constructing more contr
|
||||
LangGraph exposes high level interfaces for creating common types of agents, as well as a low-level API for composing custom flows.
|
||||
|
||||
### [`langserve`](/docs/langserve)
|
||||
|
||||
A package to deploy LangChain chains as REST APIs. Makes it easy to get a production ready API up and running.
|
||||
|
||||
### [LangSmith](/docs/langsmith)
|
||||
### [LangSmith](https://docs.smith.langchain.com)
|
||||
|
||||
A developer platform that lets you debug, test, evaluate, and monitor LLM applications.
|
||||
|
||||
@@ -58,6 +58,7 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
|
||||
/>
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
<span data-heading-keywords="lcel"></span>
|
||||
|
||||
LangChain Expression Language, or LCEL, is a declarative way to chain LangChain components.
|
||||
LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL:
|
||||
@@ -66,7 +67,7 @@ LCEL was designed from day 1 to **support putting prototypes in production, with
|
||||
When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens.
|
||||
|
||||
**Async support**
|
||||
Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/docs/langsmith) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server.
|
||||
Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/docs/langserve/) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server.
|
||||
|
||||
**Optimized parallel execution**
|
||||
Whenever your LCEL chains have steps that can be executed in parallel (eg if you fetch documents from multiple retrievers) we automatically do it, both in the sync and the async interfaces, for the smallest possible latency.
|
||||
@@ -80,23 +81,24 @@ For more complex chains it’s often very useful to access the results of interm
|
||||
**Input and output schemas**
|
||||
Input and output schemas give every LCEL chain Pydantic and JSONSchema schemas inferred from the structure of your chain. This can be used for validation of inputs and outputs, and is an integral part of LangServe.
|
||||
|
||||
[**Seamless LangSmith tracing**](/docs/langsmith)
|
||||
[**Seamless LangSmith tracing**](https://docs.smith.langchain.com)
|
||||
As your chains get more and more complex, it becomes increasingly important to understand what exactly is happening at every step.
|
||||
With LCEL, **all** steps are automatically logged to [LangSmith](/docs/langsmith/) for maximum observability and debuggability.
|
||||
With LCEL, **all** steps are automatically logged to [LangSmith](https://docs.smith.langchain.com/) for maximum observability and debuggability.
|
||||
|
||||
[**Seamless LangServe deployment**](/docs/langserve)
|
||||
Any chain created with LCEL can be easily deployed using [LangServe](/docs/langserve).
|
||||
|
||||
### Runnable interface
|
||||
<span data-heading-keywords="invoke"></span>
|
||||
|
||||
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
|
||||
|
||||
This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way.
|
||||
The standard interface includes:
|
||||
|
||||
- [`stream`](#stream): stream back chunks of the response
|
||||
- [`invoke`](#invoke): call the chain on an input
|
||||
- [`batch`](#batch): call the chain on a list of inputs
|
||||
- `stream`: stream back chunks of the response
|
||||
- `invoke`: call the chain on an input
|
||||
- `batch`: call the chain on a list of inputs
|
||||
|
||||
These also have corresponding async methods that should be used with [asyncio](https://docs.python.org/3/library/asyncio.html) `await` syntax for concurrency:
|
||||
|
||||
@@ -128,30 +130,72 @@ LangChain provides standard, extendable interfaces and external integrations for
|
||||
Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.
|
||||
|
||||
### Chat models
|
||||
<span data-heading-keywords="chat model,chat models"></span>
|
||||
|
||||
Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).
|
||||
These are traditionally newer models (older models are generally `LLMs`, see above).
|
||||
These are traditionally newer models (older models are generally `LLMs`, see below).
|
||||
Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.
|
||||
|
||||
Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input.
|
||||
This makes them interchangeable with LLMs (and simpler to use).
|
||||
When a string is passed in as input, it will be converted to a HumanMessage under the hood before being passed to the underlying model.
|
||||
Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This means you can easily use chat models in place of LLMs.
|
||||
|
||||
LangChain does not provide any ChatModels, rather we rely on third party integrations.
|
||||
When a string is passed in as input, it is converted to a `HumanMessage` and then passed to the underlying model.
|
||||
|
||||
LangChain does not host any Chat Models, rather we rely on third party integrations.
|
||||
|
||||
We have some standardized parameters when constructing ChatModels:
|
||||
- `model`: the name of the model
|
||||
- `temperature`: the sampling temperature
|
||||
- `timeout`: request timeout
|
||||
- `max_tokens`: max tokens to generate
|
||||
- `stop`: default stop sequences
|
||||
- `max_retries`: max number of times to retry requests
|
||||
- `api_key`: API key for the model provider
|
||||
- `base_url`: endpoint to send requests to
|
||||
|
||||
ChatModels also accept other parameters that are specific to that integration.
|
||||
Some important things to note:
|
||||
- standard params only apply to model providers that expose parameters with the intended functionality. For example, some providers do not expose a configuration for maximum output tokens, so max_tokens can't be supported on these.
|
||||
- standard params are currently only enforced on integrations that have their own integration packages (e.g. `langchain-openai`, `langchain-anthropic`, etc.), they're not enforced on models in ``langchain-community``.
|
||||
|
||||
ChatModels also accept other parameters that are specific to that integration. To find all the parameters supported by a ChatModel head to the API reference for that model.
|
||||
|
||||
:::important
|
||||
**Tool Calling** Some chat models have been fine-tuned for tool calling and provide a dedicated API for tool calling.
|
||||
Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.
|
||||
Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.
|
||||
:::
|
||||
|
||||
For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).
|
||||
|
||||
#### Multimodality
|
||||
|
||||
Some chat models are multimodal, accepting images, audio and even video as inputs. These are still less common, meaning model providers haven't standardized on the "best" way to define the API. Multimodal **outputs** are even less common. As such, we've kept our multimodal abstractions fairly light weight and plan to further solidify the multimodal APIs and interaction patterns as the field matures.
|
||||
|
||||
In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
|
||||
|
||||
For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).
|
||||
|
||||
For a full list of LangChain model providers with multimodal models, [check out this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
### LLMs
|
||||
<span data-heading-keywords="llm,llms"></span>
|
||||
|
||||
:::caution
|
||||
Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),
|
||||
even for non-chat use cases.
|
||||
|
||||
You are probably looking for [the section above instead](/docs/concepts/#chat-models).
|
||||
:::
|
||||
|
||||
Language models that takes a string as input and returns a string.
|
||||
These are traditionally older models (newer models generally are `ChatModels`, see below).
|
||||
These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above).
|
||||
|
||||
Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.
|
||||
This makes them interchangeable with ChatModels.
|
||||
This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).
|
||||
When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model.
|
||||
|
||||
LangChain does not provide any LLMs, rather we rely on third party integrations.
|
||||
LangChain does not host any LLMs, rather we rely on third party integrations.
|
||||
|
||||
For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).
|
||||
|
||||
### Messages
|
||||
|
||||
@@ -166,7 +210,7 @@ The `content` property describes the content of the message.
|
||||
This can be a few different things:
|
||||
|
||||
- A string (most models deal this type of content)
|
||||
- A List of dictionaries (this is used for multi-modal input, where the dictionary contains information about that input type and that input location)
|
||||
- A List of dictionaries (this is used for multimodal input, where the dictionary contains information about that input type and that input location)
|
||||
|
||||
#### HumanMessage
|
||||
|
||||
@@ -206,6 +250,8 @@ This represents the result of a tool call. This is distinct from a FunctionMessa
|
||||
|
||||
|
||||
### Prompt templates
|
||||
<span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span>
|
||||
|
||||
Prompt templates help to translate user input and parameters into instructions for a language model.
|
||||
This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.
|
||||
|
||||
@@ -214,7 +260,7 @@ Prompt Templates take as input a dictionary, where each key represents a variabl
|
||||
Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or a list of messages.
|
||||
The reason this PromptValue exists is to make it easy to switch between strings and messages.
|
||||
|
||||
There are a few different types of prompt templates
|
||||
There are a few different types of prompt templates:
|
||||
|
||||
#### String PromptTemplates
|
||||
|
||||
@@ -239,7 +285,7 @@ from langchain_core.prompts import ChatPromptTemplate
|
||||
|
||||
prompt_template = ChatPromptTemplate.from_messages([
|
||||
("system", "You are a helpful assistant"),
|
||||
("user", "Tell me a joke about {topic}"
|
||||
("user", "Tell me a joke about {topic}")
|
||||
])
|
||||
|
||||
prompt_template.invoke({"topic": "cats"})
|
||||
@@ -250,6 +296,7 @@ The first is a system message, that has no variables to format.
|
||||
The second is a HumanMessage, and will be formatted by the `topic` variable the user passes in.
|
||||
|
||||
#### MessagesPlaceholder
|
||||
<span data-heading-keywords="messagesplaceholder"></span>
|
||||
|
||||
This prompt template is responsible for adding a list of messages in a particular place.
|
||||
In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.
|
||||
@@ -281,14 +328,18 @@ prompt_template = ChatPromptTemplate.from_messages([
|
||||
])
|
||||
```
|
||||
|
||||
For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).
|
||||
|
||||
### Example selectors
|
||||
One common prompting technique for achieving better performance is to include examples as part of the prompt.
|
||||
This gives the language model concrete examples of how it should behave.
|
||||
Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.
|
||||
Example Selectors are classes responsible for selecting and then formatting examples into prompts.
|
||||
|
||||
For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).
|
||||
|
||||
### Output parsers
|
||||
<span data-heading-keywords="output parser"></span>
|
||||
|
||||
:::note
|
||||
|
||||
@@ -332,16 +383,19 @@ LangChain has lots of different types of output parsers. This is a list of outpu
|
||||
| [Datetime](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser) | | ✅ | | `str` \| `Message` | `datetime.datetime` | Parses response into a datetime string. |
|
||||
| [Structured](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser) | | ✅ | | `str` \| `Message` | `Dict[str, str]` | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. |
|
||||
|
||||
For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers).
|
||||
|
||||
### Chat history
|
||||
Most LLM applications have a conversational interface.
|
||||
An essential component of a conversation is being able to refer to information introduced earlier in the conversation.
|
||||
At bare minimum, a conversational system should be able to access some window of past messages directly.
|
||||
|
||||
The concept of `ChatHistory` refers to a class in LangChain which can be used to wrap an arbitrary chain.
|
||||
This `ChatHistory` will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database
|
||||
This `ChatHistory` will keep track of inputs and outputs of the underlying chain, and append them as messages to a message database.
|
||||
Future interactions will then load those messages and pass them into the chain as part of the input.
|
||||
|
||||
### Documents
|
||||
<span data-heading-keywords="document,documents"></span>
|
||||
|
||||
A Document object in LangChain contains information about some data. It has two attributes:
|
||||
|
||||
@@ -349,6 +403,7 @@ A Document object in LangChain contains information about some data. It has two
|
||||
- `metadata: dict`: Arbitrary metadata associated with this document. Can track the document id, file name, etc.
|
||||
|
||||
### Document loaders
|
||||
<span data-heading-keywords="document loader,document loaders"></span>
|
||||
|
||||
These classes load Document objects. LangChain has hundreds of integrations with various data sources to load data from: Slack, Notion, Google Drive, etc.
|
||||
|
||||
@@ -364,6 +419,8 @@ loader = CSVLoader(
|
||||
data = loader.load()
|
||||
```
|
||||
|
||||
For specifics on how to use document loaders, see the [relevant how-to guides here](/docs/how_to/#document-loaders).
|
||||
|
||||
### Text splitters
|
||||
|
||||
Once you've loaded documents, you'll often want to transform them to better suit your application. The simplest example is you may want to split a long document into smaller chunks that can fit into your model's context window. LangChain has a number of built-in document transformers that make it easy to split, combine, filter, and otherwise manipulate documents.
|
||||
@@ -381,18 +438,34 @@ That means there are two different axes along which you can customize your text
|
||||
1. How the text is split
|
||||
2. How the chunk size is measured
|
||||
|
||||
### Embedding models
|
||||
The Embeddings class is a class designed for interfacing with text embedding models. There are lots of embedding model providers (OpenAI, Cohere, Hugging Face, etc) - this class is designed to provide a standard interface for all of them.
|
||||
For specifics on how to use text splitters, see the [relevant how-to guides here](/docs/how_to/#text-splitters).
|
||||
|
||||
Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
|
||||
### Embedding models
|
||||
<span data-heading-keywords="embedding,embeddings"></span>
|
||||
|
||||
Embedding models create a vector representation of a piece of text. You can think of a vector as an array of numbers that captures the semantic meaning of the text.
|
||||
By representing the text in this way, you can perform mathematical operations that allow you to do things like search for other pieces of text that are most similar in meaning.
|
||||
These natural language search capabilities underpin many types of [context retrieval](/docs/concepts/#retrieval),
|
||||
where we provide an LLM with the relevant data it needs to effectively respond to a query.
|
||||
|
||||

|
||||
|
||||
The `Embeddings` class is a class designed for interfacing with text embedding models. There are many different embedding model providers (OpenAI, Cohere, Hugging Face, etc) and local models, and this class is designed to provide a standard interface for all of them.
|
||||
|
||||
The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
|
||||
|
||||
For specifics on how to use embedding models, see the [relevant how-to guides here](/docs/how_to/#embedding-models).
|
||||
|
||||
### Vector stores
|
||||
<span data-heading-keywords="vector,vectorstore,vectorstores,vector store,vector stores"></span>
|
||||
|
||||
One of the most common ways to store and search over unstructured data is to embed it and store the resulting embedding vectors,
|
||||
and then at query time to embed the unstructured query and retrieve the embedding vectors that are 'most similar' to the embedded query.
|
||||
A vector store takes care of storing embedded data and performing vector search for you.
|
||||
|
||||
Most vector stores can also store metadata about embedded vectors and support filtering on that metadata before
|
||||
similarity search, allowing you more control over returned documents.
|
||||
|
||||
Vector stores can be converted to the retriever interface by doing:
|
||||
|
||||
```python
|
||||
@@ -400,31 +473,48 @@ vectorstore = MyVectorStore()
|
||||
retriever = vectorstore.as_retriever()
|
||||
```
|
||||
|
||||
For specifics on how to use vector stores, see the [relevant how-to guides here](/docs/how_to/#vector-stores).
|
||||
|
||||
### Retrievers
|
||||
<span data-heading-keywords="retriever,retrievers"></span>
|
||||
|
||||
A retriever is an interface that returns documents given an unstructured query.
|
||||
It is more general than a vector store.
|
||||
A retriever does not need to be able to store documents, only to return (or retrieve) them.
|
||||
Retrievers can be created from vectorstores, but are also broad enough to include [Wikipedia search](/docs/integrations/retrievers/wikipedia/) and [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/).
|
||||
Retrievers can be created from vector stores, but are also broad enough to include [Wikipedia search](/docs/integrations/retrievers/wikipedia/) and [Amazon Kendra](/docs/integrations/retrievers/amazon_kendra_retriever/).
|
||||
|
||||
Retrievers accept a string query as input and return a list of Document's as output.
|
||||
|
||||
For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers).
|
||||
|
||||
### Tools
|
||||
Tools are interfaces that an agent, chain, or LLM can use to interact with the world.
|
||||
They combine a few things:
|
||||
<span data-heading-keywords="tool,tools"></span>
|
||||
|
||||
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
|
||||
|
||||
A tool consists of the following components:
|
||||
|
||||
1. The name of the tool
|
||||
2. A description of what the tool is
|
||||
2. A description of what the tool does
|
||||
3. JSON schema of what the inputs to the tool are
|
||||
4. The function to call
|
||||
5. Whether the result of a tool should be returned directly to the user
|
||||
5. Whether the result of a tool should be returned directly to the user (only relevant for agents)
|
||||
|
||||
It is useful to have all this information because this information can be used to build action-taking systems! The name, description, and JSON schema can be used to prompt the LLM so it knows how to specify what action to take, and then the function to call is equivalent to taking that action.
|
||||
The name, description and JSON schema are provided as context
|
||||
to the LLM, allowing the LLM to determine how to use the tool
|
||||
appropriately.
|
||||
|
||||
The simpler the input to a tool is, the easier it is for an LLM to be able to use it.
|
||||
Many agents will only work with tools that have a single string input.
|
||||
Given a list of available tools and a prompt, an LLM can request
|
||||
that one or more tools be invoked with appropriate arguments.
|
||||
|
||||
Importantly, the name, description, and JSON schema (if used) are all used in the prompt. Therefore, it is really important that they are clear and describe exactly how the tool should be used. You may need to change the default name, description, or JSON schema if the LLM is not understanding how to use the tool.
|
||||
Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following:
|
||||
|
||||
- Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models.
|
||||
- Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls.
|
||||
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
|
||||
- Simpler tools are generally easier for models to use than more complex tools.
|
||||
|
||||
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
|
||||
|
||||
### Toolkits
|
||||
|
||||
@@ -445,7 +535,7 @@ tools = toolkit.get_tools()
|
||||
|
||||
By themselves, language models can't take actions - they just output text.
|
||||
A big use case for LangChain is creating **agents**.
|
||||
Agents are systems that use an LLM as a reasoning enginer to determine which actions to take and what the inputs to those actions should be.
|
||||
Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be.
|
||||
The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.
|
||||
|
||||
[LangGraph](https://github.com/langchain-ai/langgraph) is an extension of LangChain specifically aimed at creating highly controllable and customizable agents.
|
||||
@@ -458,17 +548,296 @@ In order to solve that we built LangGraph to be this flexible, highly-controllab
|
||||
|
||||
If you are still using AgentExecutor, do not fear: we still have a guide on [how to use AgentExecutor](/docs/how_to/agent_executor).
|
||||
It is recommended, however, that you start to transition to LangGraph.
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent)
|
||||
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent).
|
||||
|
||||
### Callbacks
|
||||
|
||||
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
|
||||
|
||||
You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail.
|
||||
|
||||
#### Callback Events
|
||||
|
||||
| Event | Event Trigger | Associated Method |
|
||||
|------------------|---------------------------------------------|-----------------------|
|
||||
| Chat model start | When a chat model starts | `on_chat_model_start` |
|
||||
| LLM start | When a llm starts | `on_llm_start` |
|
||||
| LLM new token | When an llm OR chat model emits a new token | `on_llm_new_token` |
|
||||
| LLM ends | When an llm OR chat model ends | `on_llm_end` |
|
||||
| LLM errors | When an llm OR chat model errors | `on_llm_error` |
|
||||
| Chain start | When a chain starts running | `on_chain_start` |
|
||||
| Chain end | When a chain ends | `on_chain_end` |
|
||||
| Chain error | When a chain errors | `on_chain_error` |
|
||||
| Tool start | When a tool starts running | `on_tool_start` |
|
||||
| Tool end | When a tool ends | `on_tool_end` |
|
||||
| Tool error | When a tool errors | `on_tool_error` |
|
||||
| Agent action | When an agent takes an action | `on_agent_action` |
|
||||
| Agent finish | When an agent ends | `on_agent_finish` |
|
||||
| Retriever start | When a retriever starts | `on_retriever_start` |
|
||||
| Retriever end | When a retriever ends | `on_retriever_end` |
|
||||
| Retriever error | When a retriever errors | `on_retriever_error` |
|
||||
| Text | When arbitrary text is run | `on_text` |
|
||||
| Retry | When a retry event is run | `on_retry` |
|
||||
|
||||
#### Callback handlers
|
||||
|
||||
Callback handlers can either be `sync` or `async`:
|
||||
|
||||
* Sync callback handlers implement the [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface.
|
||||
* Async callback handlers implement the [AsyncCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface.
|
||||
|
||||
During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered.
|
||||
|
||||
#### Passing callbacks
|
||||
|
||||
The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
|
||||
|
||||
The callbacks are available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
|
||||
|
||||
- **Request time callbacks**: Passed at the time of the request in addition to the input data.
|
||||
Available on all standard `Runnable` objects. These callbacks are INHERITED by all children
|
||||
of the object they are defined on. For example, `chain.invoke({"number": 25}, {"callbacks": [handler]})`.
|
||||
- **Constructor callbacks**: `chain = TheNameOfSomeChain(callbacks=[handler])`. These callbacks
|
||||
are passed as arguments to the constructor of the object. The callbacks are scoped
|
||||
only to the object they are defined on, and are **not** inherited by any children of the object.
|
||||
|
||||
:::warning
|
||||
Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children
|
||||
of the object.
|
||||
:::
|
||||
|
||||
If you're creating a custom chain or runnable, you need to remember to propagate request time
|
||||
callbacks to any child objects.
|
||||
|
||||
:::important Async in Python<=3.10
|
||||
|
||||
Any `RunnableLambda`, a `RunnableGenerator`, or `Tool` that invokes other runnables
|
||||
and is running async in python<=3.10, will have to propagate callbacks to child
|
||||
objects manually. This is because LangChain cannot automatically propagate
|
||||
callbacks to child objects in this case.
|
||||
|
||||
This is a common reason why you may fail to see events being emitted from custom
|
||||
runnables or tools.
|
||||
:::
|
||||
|
||||
For specifics on how to use callbacks, see the [relevant how-to guides here](/docs/how_to/#callbacks).
|
||||
|
||||
## Techniques
|
||||
|
||||
### Function/tool calling
|
||||
### Streaming
|
||||
<span data-heading-keywords="stream,streaming"></span>
|
||||
|
||||
Individual LLM calls often run for much longer than traditional resource requests.
|
||||
This compounds when you build more complex chains or agents that require multiple reasoning steps.
|
||||
|
||||
Fortunately, LLMs generate output iteratively, which means it's possible to show sensible intermediate results
|
||||
before the final response is ready. Consuming output as soon as it becomes available has therefore become a vital part of the UX
|
||||
around building apps with LLMs to help alleviate latency issues, and LangChain aims to have first-class support for streaming.
|
||||
|
||||
Below, we'll discuss some concepts and considerations around streaming in LangChain.
|
||||
|
||||
#### `.stream()` and `.astream()`
|
||||
|
||||
Most modules in LangChain include the `.stream()` method (and the equivalent `.astream()` method for [async](https://docs.python.org/3/library/asyncio.html) environments) as an ergonomic streaming interface.
|
||||
`.stream()` returns an iterator, which you can consume with a simple `for` loop. Here's an example with a chat model:
|
||||
|
||||
```python
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(model="claude-3-sonnet-20240229")
|
||||
|
||||
for chunk in model.stream("what color is the sky?"):
|
||||
print(chunk.content, end="|", flush=True)
|
||||
```
|
||||
|
||||
For models (or other components) that don't support streaming natively, this iterator would just yield a single chunk, but
|
||||
you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode
|
||||
without the need to provide additional config.
|
||||
|
||||
The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html).
|
||||
Because this method is part of [LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel),
|
||||
you can handle formatting differences from different outputs using an [output parser](/docs/concepts/#output-parsers) to transform
|
||||
each yielded chunk.
|
||||
|
||||
You can check out [this guide](/docs/how_to/streaming/#using-stream) for more detail on how to use `.stream()`.
|
||||
|
||||
#### `.astream_events()`
|
||||
<span data-heading-keywords="astream_events,stream_events,stream events"></span>
|
||||
|
||||
While the `.stream()` method is intuitive, it can only return the final generated value of your chain. This is fine for single LLM calls,
|
||||
but as you build more complex chains of several LLM calls together, you may want to use the intermediate values of
|
||||
the chain alongside the final output - for example, returning sources alongside the final generation when building a chat
|
||||
over documents app.
|
||||
|
||||
There are ways to do this [using callbacks](/docs/concepts/#callbacks-1), or by constructing your chain in such a way that it passes intermediate
|
||||
values to the end with something like chained [`.assign()`](/docs/how_to/passthrough/) calls, but LangChain also includes an
|
||||
`.astream_events()` method that combines the flexibility of callbacks with the ergonomics of `.stream()`. When called, it returns an iterator
|
||||
which yields [various types of events](/docs/how_to/streaming/#event-reference) that you can filter and process according
|
||||
to the needs of your project.
|
||||
|
||||
Here's one small example that prints just events containing streamed chat model output:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(model="claude-3-sonnet-20240229")
|
||||
|
||||
prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
|
||||
parser = StrOutputParser()
|
||||
chain = prompt | model | parser
|
||||
|
||||
async for event in chain.astream_events({"topic": "parrot"}, version="v2"):
|
||||
kind = event["event"]
|
||||
if kind == "on_chat_model_stream":
|
||||
print(event, end="|", flush=True)
|
||||
```
|
||||
|
||||
You can roughly think of it as an iterator over callback events (though the format differs) - and you can use it on almost all LangChain components!
|
||||
|
||||
See [this guide](/docs/how_to/streaming/#using-stream-events) for more detailed information on how to use `.astream_events()`,
|
||||
including a table listing available events.
|
||||
|
||||
#### Callbacks
|
||||
|
||||
The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a
|
||||
callback handler that handles the [`on_llm_new_token`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any
|
||||
[LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls
|
||||
the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response.
|
||||
You can also handle the [`on_llm_end`](https://api.python.langchain.com/en/latest/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup.
|
||||
|
||||
You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks.
|
||||
|
||||
Callbacks were the first technique for streaming introduced in LangChain. While powerful and generalizable,
|
||||
they can be unwieldy for developers. For example:
|
||||
|
||||
- You need to explicitly initialize and manage some aggregator or other stream to collect results.
|
||||
- The execution order isn't explicitly guaranteed, and you could theoretically have a callback run after the `.invoke()` method finishes.
|
||||
- Providers would often make you pass an additional parameter to stream outputs instead of returning them all at once.
|
||||
- You would often ignore the result of the actual model call in favor of callback results.
|
||||
|
||||
#### Tokens
|
||||
|
||||
The unit that most model providers use to measure input and output is via a unit called a **token**.
|
||||
Tokens are the basic units that language models read and generate when processing or producing text.
|
||||
The exact definition of a token can vary depending on the specific way the model was trained -
|
||||
for instance, in English, a token could be a single word like "apple", or a part of a word like "app".
|
||||
|
||||
When you send a model a prompt, the words and characters in the prompt are encoded into tokens using a **tokenizer**.
|
||||
The model then streams back generated output tokens, which the tokenizer decodes into human-readable text.
|
||||
The below example shows how OpenAI models tokenize `LangChain is cool!`:
|
||||
|
||||

|
||||
|
||||
You can see that it gets split into 5 different tokens, and that the boundaries between tokens are not exactly the same as word boundaries.
|
||||
|
||||
The reason language models use tokens rather than something more immediately intuitive like "characters"
|
||||
has to do with how they process and understand text. At a high-level, language models iteratively predict their next generated output based on
|
||||
the initial input and their previous generations. Training the model using tokens language models to handle linguistic
|
||||
units (like words or subwords) that carry meaning, rather than individual characters, which makes it easier for the model
|
||||
to learn and understand the structure of the language, including grammar and context.
|
||||
Furthermore, using tokens can also improve efficiency, since the model processes fewer units of text compared to character-level processing.
|
||||
|
||||
### Structured output
|
||||
|
||||
LLMs are capable of generating arbitrary text. This enables the model to respond appropriately to a wide
|
||||
range of inputs, but for some use-cases, it can be useful to constrain the LLM's output
|
||||
to a specific format or structure. This is referred to as **structured output**.
|
||||
|
||||
For example, if the output is to be stored in a relational database,
|
||||
it is much easier if the model generates output that adheres to a defined schema or format.
|
||||
[Extracting specific information](/docs/tutorials/extraction/) from unstructured text is another
|
||||
case where this is particularly useful. Most commonly, the output format will be JSON,
|
||||
though other formats such as [YAML](/docs/how_to/output_parser_yaml/) can be useful too. Below, we'll discuss
|
||||
a few ways to get structured output from models in LangChain.
|
||||
|
||||
#### `.with_structured_output()`
|
||||
|
||||
For convenience, some LangChain chat models support a `.with_structured_output()` method.
|
||||
This method only requires a schema as input, and returns a dict or Pydantic object.
|
||||
Generally, this method is only present on models that support one of the more advanced methods described below,
|
||||
and will use one of them under the hood. It takes care of importing a suitable output parser and
|
||||
formatting the schema in the right format for the model.
|
||||
|
||||
For more information, check out this [how-to guide](/docs/how_to/structured_output/#the-with_structured_output-method).
|
||||
|
||||
#### Raw prompting
|
||||
|
||||
The most intuitive way to get a model to structure output is to ask nicely.
|
||||
In addition to your query, you can give instructions describing what kind of output you'd like, then
|
||||
parse the output using an [output parser](/docs/concepts/#output-parsers) to convert the raw
|
||||
model message or string output into something more easily manipulated.
|
||||
|
||||
The biggest benefit to raw prompting is its flexibility:
|
||||
|
||||
- Raw prompting does not require any special model features, only sufficient reasoning capability to understand
|
||||
the passed schema.
|
||||
- You can prompt for any format you'd like, not just JSON. This can be useful if the model you
|
||||
are using is more heavily trained on a certain type of data, such as XML or YAML.
|
||||
|
||||
However, there are some drawbacks too:
|
||||
|
||||
- LLMs are non-deterministic, and prompting a LLM to consistently output data in the exactly correct format
|
||||
for smooth parsing can be surprisingly difficult and model-specific.
|
||||
- Individual models have quirks depending on the data they were trained on, and optimizing prompts can be quite difficult.
|
||||
Some may be better at interpreting [JSON schema](https://json-schema.org/), others may be best with TypeScript definitions,
|
||||
and still others may prefer XML.
|
||||
|
||||
While we'll next go over some ways that you can take advantage of features offered by
|
||||
model providers to increase reliability, prompting techniques remain important for tuning your
|
||||
results no matter what method you choose.
|
||||
|
||||
#### JSON mode
|
||||
<span data-heading-keywords="json mode"></span>
|
||||
|
||||
Some models, such as [Mistral](/docs/integrations/chat/mistralai/), [OpenAI](/docs/integrations/chat/openai/),
|
||||
[Together AI](/docs/integrations/chat/together/) and [Ollama](/docs/integrations/chat/ollama/),
|
||||
support a feature called **JSON mode**, usually enabled via config.
|
||||
|
||||
When enabled, JSON mode will constrain the model's output to always be some sort of valid JSON.
|
||||
Often they require some custom prompting, but it's usually much less burdensome and along the lines of,
|
||||
`"you must always return JSON"`, and the [output is easier to parse](/docs/how_to/output_parser_json/).
|
||||
|
||||
It's also generally simpler and more commonly available than tool calling.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.output_parsers.json import SimpleJsonOutputParser
|
||||
|
||||
model = ChatOpenAI(
|
||||
model="gpt-4o",
|
||||
model_kwargs={ "response_format": { "type": "json_object" } },
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_template(
|
||||
"Answer the user's question to the best of your ability."
|
||||
'You must always output a JSON object with an "answer" key and a "followup_question" key.'
|
||||
"{question}"
|
||||
)
|
||||
|
||||
chain = prompt | model | SimpleJsonOutputParser()
|
||||
|
||||
chain.invoke({ "question": "What is the powerhouse of the cell?" })
|
||||
```
|
||||
|
||||
```
|
||||
{'answer': 'The powerhouse of the cell is the mitochondrion. It is responsible for producing energy in the form of ATP through cellular respiration.',
|
||||
'followup_question': 'Would you like to know more about how mitochondria produce energy?'}
|
||||
```
|
||||
|
||||
For a full list of model providers that support JSON mode, see [this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
#### Function/tool calling
|
||||
|
||||
:::info
|
||||
We use the term tool calling interchangeably with function calling. Although
|
||||
function calling is sometimes meant to refer to invocations of a single function,
|
||||
we treat all models as though they can return multiple tool or function calls in
|
||||
each message.
|
||||
each message
|
||||
:::
|
||||
|
||||
Tool calling allows a model to respond to a given prompt by generating output that
|
||||
@@ -480,8 +849,10 @@ from unstructured text, you could give the model an "extraction" tool that takes
|
||||
parameters matching the desired schema, then treat the generated output as your final
|
||||
result.
|
||||
|
||||
A tool call includes a name, arguments dict, and an optional identifier. The
|
||||
arguments dict is structured `{argument_name: argument_value}`.
|
||||
For models that support it, tool calling can be very convenient. It removes the
|
||||
guesswork around how best to prompt schemas in favor of a built-in model feature. It can also
|
||||
more naturally support agentic flows, since you can just pass multiple tool schemas instead
|
||||
of fiddling with enums or unions.
|
||||
|
||||
Many LLM providers, including [Anthropic](https://www.anthropic.com/),
|
||||
[Cohere](https://cohere.com/), [Google](https://cloud.google.com/vertex-ai),
|
||||
@@ -494,37 +865,178 @@ receive the tool call, execute it, and return the output to the LLM to inform it
|
||||
response. LangChain includes a suite of [built-in tools](/docs/integrations/tools/)
|
||||
and supports several methods for defining your own [custom tools](/docs/how_to/custom_tools).
|
||||
|
||||
There are two main use cases for function/tool calling:
|
||||
LangChain provides a standardized interface for tool calling that is consistent across different models.
|
||||
|
||||
The standard interface consists of:
|
||||
|
||||
* `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call. This method accepts [LangChain tools](/docs/concepts/#tools) here.
|
||||
* `AIMessage.tool_calls`: an attribute on the `AIMessage` returned from the model for accessing the tool calls requested by the model.
|
||||
|
||||
The following how-to guides are good practical resources for using function/tool calling:
|
||||
|
||||
- [How to return structured data from an LLM](/docs/how_to/structured_output/)
|
||||
- [How to use a model to call tools](/docs/how_to/tool_calling/)
|
||||
|
||||
For a full list of model providers that support tool calling, [see this table](/docs/integrations/chat/#advanced-features).
|
||||
|
||||
### Retrieval
|
||||
|
||||
LangChain provides several advanced retrieval types. A full list is below, along with the following information:
|
||||
LLMs are trained on a large but fixed dataset, limiting their ability to reason over private or recent information. Fine-tuning an LLM with specific facts is one way to mitigate this, but is often [poorly suited for factual recall](https://www.anyscale.com/blog/fine-tuning-is-for-form-not-facts) and [can be costly](https://www.glean.com/blog/how-to-build-an-ai-assistant-for-the-enterprise).
|
||||
Retrieval is the process of providing relevant information to an LLM to improve its response for a given input. Retrieval augmented generation (RAG) is the process of grounding the LLM generation (output) using the retrieved information.
|
||||
|
||||
**Name**: Name of the retrieval algorithm.
|
||||
:::tip
|
||||
|
||||
**Index Type**: Which index type (if any) this relies on.
|
||||
* See our RAG from Scratch [code](https://github.com/langchain-ai/rag-from-scratch) and [video series](https://youtube.com/playlist?list=PLfaIDFEXuae2LXbO1_PKyVJiQ23ZztA0x&feature=shared).
|
||||
* For a high-level guide on retrieval, see this [tutorial on RAG](/docs/tutorials/rag/).
|
||||
|
||||
**Uses an LLM**: Whether this retrieval method uses an LLM.
|
||||
:::
|
||||
|
||||
**When to Use**: Our commentary on when you should considering using this retrieval method.
|
||||
RAG is only as good as the retrieved documents’ relevance and quality. Fortunately, an emerging set of techniques can be employed to design and improve RAG systems. We've focused on taxonomizing and summarizing many of these techniques (see below figure) and will share some high-level strategic guidance in the following sections.
|
||||
You can and should experiment with using different pieces together. You might also find [this LangSmith guide](https://docs.smith.langchain.com/how_to_guides/evaluation/evaluate_llm_application) useful for showing how to evaluate different iterations of your app.
|
||||
|
||||
**Description**: Description of what this retrieval algorithm is doing.
|
||||

|
||||
|
||||
#### Query Translation
|
||||
|
||||
First, consider the user input(s) to your RAG system. Ideally, a RAG system can handle a wide range of inputs, from poorly worded questions to complex multi-part queries.
|
||||
**Using an LLM to review and optionally modify the input is the central idea behind query translation.** This serves as a general buffer, optimizing raw user inputs for your retrieval system.
|
||||
For example, this can be as simple as extracting keywords or as complex as generating multiple sub-questions for a complex query.
|
||||
|
||||
| Name | When to use | Description |
|
||||
|---------------|-------------|-------------|
|
||||
| [Multi-query](/docs/how_to/MultiQueryRetriever/) | When you need to cover multiple perspectives of a question. | Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, return the unique documents for all queries. |
|
||||
| [Decomposition](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a question can be broken down into smaller subproblems. | Decompose a question into a set of subproblems / questions, which can either be solved sequentially (use the answer from first + retrieval to answer the second) or in parallel (consolidate each answer into final answer). |
|
||||
| [Step-back](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | When a higher-level conceptual understanding is required. | First prompt the LLM to ask a generic step-back question about higher-level concepts or principles, and retrieve relevant facts about them. Use this grounding to help answer the user question. |
|
||||
| [HyDE](https://github.com/langchain-ai/rag-from-scratch/blob/main/rag_from_scratch_5_to_9.ipynb) | If you have challenges retrieving relevant documents using the raw user inputs. | Use an LLM to convert questions into hypothetical documents that answer the question. Use the embedded hypothetical documents to retrieve real documents with the premise that doc-doc similarity search can produce more relevant matches. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch videos for a few different specific approaches:
|
||||
- [Multi-query](https://youtu.be/JChPi0CRnDY?feature=shared)
|
||||
- [Decomposition](https://youtu.be/h0OPWlEOank?feature=shared)
|
||||
- [Step-back](https://youtu.be/xn1jEjRyJ2U?feature=shared)
|
||||
- [HyDE](https://youtu.be/SaDzIVkYqyY?feature=shared)
|
||||
|
||||
:::
|
||||
|
||||
#### Routing
|
||||
|
||||
Second, consider the data sources available to your RAG system. You want to query across more than one database or across structured and unstructured data sources. **Using an LLM to review the input and route it to the appropriate data source is a simple and effective approach for querying across sources.**
|
||||
|
||||
| Name | When to use | Description |
|
||||
|------------------|--------------------------------------------|-------------|
|
||||
| [Logical routing](/docs/how_to/routing/) | When you can prompt an LLM with rules to decide where to route the input. | Logical routing can use an LLM to reason about the query and choose which datastore is most appropriate. |
|
||||
| [Semantic routing](/docs/how_to/routing/#routing-by-semantic-similarity) | When semantic similarity is an effective way to determine where to route the input. | Semantic routing embeds both query and, typically a set of prompts. It then chooses the appropriate prompt based upon similarity. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [routing](https://youtu.be/pfpIndq7Fi8?feature=shared).
|
||||
|
||||
:::
|
||||
|
||||
#### Query Construction
|
||||
|
||||
Third, consider whether any of your data sources require specific query formats. Many structured databases use SQL. Vector stores often have specific syntax for applying keyword filters to document metadata. **Using an LLM to convert a natural language query into a query syntax is a popular and powerful approach.**
|
||||
In particular, [text-to-SQL](/docs/tutorials/sql_qa/), [text-to-Cypher](/docs/tutorials/graph/), and [query analysis for metadata filters](/docs/tutorials/query_analysis/#query-analysis) are useful ways to interact with structured, graph, and vector databases respectively.
|
||||
|
||||
| Name | When to Use | Description |
|
||||
|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Text to SQL](/docs/tutorials/sql_qa/) | If users are asking questions that require information housed in a relational database, accessible via SQL. | This uses an LLM to transform user input into a SQL query. |
|
||||
| [Text-to-Cypher](/docs/tutorials/graph/) | If users are asking questions that require information housed in a graph database, accessible via Cypher. | This uses an LLM to transform user input into a Cypher query. |
|
||||
| [Self Query](/docs/how_to/self_query/) | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filter to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
|
||||
|
||||
:::tip
|
||||
|
||||
See our [blog post overview](https://blog.langchain.dev/query-construction/) and RAG from Scratch video on [query construction](https://youtu.be/kl6NwWYxvbM?feature=shared), the process of text-to-DSL where DSL is a domain specific language required to interact with a given database. This converts user questions into structured queries.
|
||||
|
||||
:::
|
||||
|
||||
#### Indexing
|
||||
|
||||
Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
|
||||
|
||||
Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens.
|
||||
|
||||
Two approaches can address this tension: (1) [Multi Vector](/docs/how_to/multi_vector/) retriever using an LLM to translate documents into any form (e.g., often into a summary) that is well-suited for indexing, but returns full documents to the LLM for generation. (2) [ParentDocument](/docs/how_to/parent_document_retriever/) retriever embeds document chunks, but also returns full documents. The idea is to get the best of both worlds: use concise representations (summaries or chunks) for retrieval, but use the full documents for answer generation.
|
||||
|
||||
| Name | Index Type | Uses an LLM | When to Use | Description |
|
||||
|---------------------------|------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Vectorstore](/docs/how_to/vectorstore_retriever/) | Vectorstore | No | If you are just getting started and looking for something quick and easy. | This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. |
|
||||
| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vectorstore + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). |
|
||||
| [Multi Vector](/docs/how_to/multi_vector/) | Vectorstore + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. |
|
||||
| [Self Query](/docs/how_to/self_query/) | Vectorstore | Yes | If users are asking questions that are better answered by fetching documents based on metadata rather than similarity with the text. | This uses an LLM to transform user input into two things: (1) a string to look up semantically, (2) a metadata filer to go along with it. This is useful because oftentimes questions are about the METADATA of documents (not the content itself). |
|
||||
| [Contextual Compression](/docs/how_to/contextual_compression/) | Any | Sometimes | If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. | This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. |
|
||||
| [Time-Weighted Vectorstore](/docs/how_to/time_weighted_vectorstore/) | Vectorstore | No | If you have timestamps associated with your documents, and you want to retrieve the most recent ones | This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) |
|
||||
| [Multi-Query Retriever](/docs/how_to/MultiQueryRetriever/) | Any | Yes | If users are asking questions that are complex and require multiple pieces of distinct information to respond | This uses an LLM to generate multiple queries from the original one. This is useful when the original query needs pieces of information about multiple topics to be properly answered. By generating multiple queries, we can then fetch documents for each of them. |
|
||||
| [Ensemble](/docs/how_to/ensemble_retriever/) | Any | No | If you have multiple retrieval methods and want to try combining them. | This fetches documents from multiple retrievers and then combines them. |
|
||||
| [Vector store](/docs/how_to/vectorstore_retriever/) | Vector store | No | If you are just getting started and looking for something quick and easy. | This is the simplest method and the one that is easiest to get started with. It involves creating embeddings for each piece of text. |
|
||||
| [ParentDocument](/docs/how_to/parent_document_retriever/) | Vector store + Document Store | No | If your pages have lots of smaller pieces of distinct information that are best indexed by themselves, but best retrieved all together. | This involves indexing multiple chunks for each document. Then you find the chunks that are most similar in embedding space, but you retrieve the whole parent document and return that (rather than individual chunks). |
|
||||
| [Multi Vector](/docs/how_to/multi_vector/) | Vector store + Document Store | Sometimes during indexing | If you are able to extract information from documents that you think is more relevant to index than the text itself. | This involves creating multiple vectors for each document. Each vector could be created in a myriad of ways - examples include summaries of the text and hypothetical questions. |
|
||||
| [Time-Weighted Vector store](/docs/how_to/time_weighted_vectorstore/) | Vector store | No | If you have timestamps associated with your documents, and you want to retrieve the most recent ones | This fetches documents based on a combination of semantic similarity (as in normal vector retrieval) and recency (looking at timestamps of indexed documents) |
|
||||
|
||||
:::tip
|
||||
|
||||
- See our RAG from Scratch video on [indexing fundamentals](https://youtu.be/bjb_EMsTDKI?feature=shared)
|
||||
- See our RAG from Scratch video on [multi vector retriever](https://youtu.be/gTCU9I6QqCE?feature=shared)
|
||||
|
||||
:::
|
||||
|
||||
Fifth, consider ways to improve the quality of your similarity search itself. Embedding models compress text into fixed-length (vector) representations that capture the semantic content of the document. This compression is useful for search / retrieval, but puts a heavy burden on that single vector representation to capture the semantic nuance / detail of the document. In some cases, irrelevant or redundant content can dilute the semantic usefulness of the embedding.
|
||||
|
||||
[ColBERT](https://docs.google.com/presentation/d/1IRhAdGjIevrrotdplHNcc4aXgIYyKamUKTWtB3m3aMU/edit?usp=sharing) is an interesting approach to address this with a higher granularity embeddings: (1) produce a contextually influenced embedding for each token in the document and query, (2) score similarity between each query token and all document tokens, (3) take the max, (4) do this for all query tokens, and (5) take the sum of the max scores (in step 3) for all query tokens to get a query-document similarity score; this token-wise scoring can yield strong results.
|
||||
|
||||

|
||||
|
||||
There are some additional tricks to improve the quality of your retrieval. Embeddings excel at capturing semantic information, but may struggle with keyword-based queries. Many [vector stores](/docs/integrations/retrievers/pinecone_hybrid_search/) offer built-in [hybrid-search](https://docs.pinecone.io/guides/data/understanding-hybrid-search) to combine keyword and semantic similarity, which marries the benefits of both approaches. Furthermore, many vector stores have [maximal marginal relevance](https://python.langchain.com/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/), which attempts to diversify the results of a search to avoid returning similar and redundant documents.
|
||||
|
||||
| Name | When to use | Description |
|
||||
|-------------------|----------------------------------------------------------|-------------|
|
||||
| [ColBERT](/docs/integrations/providers/ragatouille/#using-colbert-as-a-reranker) | When higher granularity embeddings are needed. | ColBERT uses contextually influenced embeddings for each token in the document and query to get a granular query-document similarity score. |
|
||||
| [Hybrid search](/docs/integrations/retrievers/pinecone_hybrid_search/) | When combining keyword-based and semantic similarity. | Hybrid search combines keyword and semantic similarity, marrying the benefits of both approaches. |
|
||||
| [Maximal Marginal Relevance (MMR)](/docs/integrations/vectorstores/pinecone/#maximal-marginal-relevance-searches) | When needing to diversify search results. | MMR attempts to diversify the results of a search to avoid returning similar and redundant documents. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [ColBERT](https://youtu.be/cN6S0Ehm7_8?feature=shared>).
|
||||
|
||||
:::
|
||||
|
||||
#### Post-processing
|
||||
|
||||
Sixth, consider ways to filter or rank retrieved documents. This is very useful if you are [combining documents returned from multiple sources](/docs/integrations/retrievers/cohere-reranker/#doing-reranking-with-coherererank), since it can can down-rank less relevant documents and / or [compress similar documents](/docs/how_to/contextual_compression/#more-built-in-compressors-filters).
|
||||
|
||||
| Name | Index Type | Uses an LLM | When to Use | Description |
|
||||
|---------------------------|------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Contextual Compression](/docs/how_to/contextual_compression/) | Any | Sometimes | If you are finding that your retrieved documents contain too much irrelevant information and are distracting the LLM. | This puts a post-processing step on top of another retriever and extracts only the most relevant information from retrieved documents. This can be done with embeddings or an LLM. |
|
||||
| [Ensemble](/docs/how_to/ensemble_retriever/) | Any | No | If you have multiple retrieval methods and want to try combining them. | This fetches documents from multiple retrievers and then combines them. |
|
||||
| [Re-ranking](/docs/integrations/retrievers/cohere-reranker/) | Any | Yes | If you want to rank retrieved documents based upon relevance, especially if you want to combine results from multiple retrieval methods . | Given a query and a list of documents, Rerank indexes the documents from most to least semantically relevant to the query. |
|
||||
|
||||
:::tip
|
||||
|
||||
See our RAG from Scratch video on [RAG-Fusion](https://youtu.be/77qELPbNgxA?feature=shared), on approach for post-processing across multiple queries: Rewrite the user question from multiple perspectives, retrieve documents for each rewritten question, and combine the ranks of multiple search result lists to produce a single, unified ranking with [Reciprocal Rank Fusion (RRF)](https://towardsdatascience.com/forget-rag-the-future-is-rag-fusion-1147298d8ad1).
|
||||
|
||||
:::
|
||||
|
||||
#### Generation
|
||||
|
||||
**Finally, consider ways to build self-correction into your RAG system.** RAG systems can suffer from low quality retrieval (e.g., if a user question is out of the domain for the index) and / or hallucinations in generation. A naive retrieve-generate pipeline has no ability to detect or self-correct from these kinds of errors. The concept of ["flow engineering"](https://x.com/karpathy/status/1748043513156272416) has been introduced [in the context of code generation](https://arxiv.org/abs/2401.08500): iteratively build an answer to a code question with unit tests to check and self-correct errors. Several works have applied this RAG, such as Self-RAG and Corrective-RAG. In both cases, checks for document relevance, hallucinations, and / or answer quality are performed in the RAG answer generation flow.
|
||||
|
||||
We've found that graphs are a great way to reliably express logical flows and have implemented ideas from several of these papers [using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag), as shown in the figure below (red - routing, blue - fallback, green - self-correction):
|
||||
- **Routing:** Adaptive RAG ([paper](https://arxiv.org/abs/2403.14403)). Route questions to different retrieval approaches, as discussed above
|
||||
- **Fallback:** Corrective RAG ([paper](https://arxiv.org/pdf/2401.15884.pdf)). Fallback to web search if docs are not relevant to query
|
||||
- **Self-correction:** Self-RAG ([paper](https://arxiv.org/abs/2310.11511)). Fix answers w/ hallucinations or don’t address question
|
||||
|
||||

|
||||
|
||||
| Name | When to use | Description |
|
||||
|-------------------|-----------------------------------------------------------|-------------|
|
||||
| Self-RAG | When needing to fix answers with hallucinations or irrelevant content. | Self-RAG performs checks for document relevance, hallucinations, and answer quality during the RAG answer generation flow, iteratively building an answer and self-correcting errors. |
|
||||
| Corrective-RAG | When needing a fallback mechanism for low relevance docs. | Corrective-RAG includes a fallback (e.g., to web search) if the retrieved documents are not relevant to the query, ensuring higher quality and more relevant retrieval. |
|
||||
|
||||
:::tip
|
||||
|
||||
See several videos and cookbooks showcasing RAG with LangGraph:
|
||||
- [LangGraph Corrective RAG](https://www.youtube.com/watch?v=E2shqsYwxck)
|
||||
- [LangGraph combining Adaptive, Self-RAG, and Corrective RAG](https://www.youtube.com/watch?v=-ROS6gfYIts)
|
||||
- [Cookbooks for RAG using LangGraph](https://github.com/langchain-ai/langgraph/tree/main/examples/rag)
|
||||
|
||||
See our LangGraph RAG recipes with partners:
|
||||
- [Meta](https://github.com/meta-llama/llama-recipes/tree/main/recipes/use_cases/agents/langchain)
|
||||
- [Mistral](https://github.com/mistralai/cookbook/tree/main/third_party/langchain)
|
||||
|
||||
:::
|
||||
|
||||
### Text splitting
|
||||
|
||||
@@ -550,3 +1062,20 @@ Table columns:
|
||||
| Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. |
|
||||
| Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) |
|
||||
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
|
||||
|
||||
### Evaluation
|
||||
<span data-heading-keywords="evaluation,evaluate"></span>
|
||||
|
||||
Evaluation is the process of assessing the performance and effectiveness of your LLM-powered applications.
|
||||
It involves testing the model's responses against a set of predefined criteria or benchmarks to ensure it meets the desired quality standards and fulfills the intended purpose.
|
||||
This process is vital for building reliable applications.
|
||||
|
||||

|
||||
|
||||
[LangSmith](https://docs.smith.langchain.com/) helps with this process in a few ways:
|
||||
|
||||
- It makes it easier to create and curate datasets via its tracing and annotation features
|
||||
- It provides an evaluation framework that helps you define metrics and run your app against your dataset
|
||||
- It allows you to track results over time and automatically run your evaluators on a schedule or as part of CI/Code
|
||||
|
||||
To learn more, check out [this LangSmith guide](https://docs.smith.langchain.com/concepts/evaluation).
|
||||
|
||||
@@ -206,9 +206,7 @@ ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogy
|
||||
|
||||
`langchain-core` and partner packages **do not use** optional dependencies in this way.
|
||||
|
||||
You only need to add a new dependency if a **unit test** relies on the package.
|
||||
If your package is only required for **integration tests**, then you can skip these
|
||||
steps and leave all pyproject.toml and poetry.lock files alone.
|
||||
You'll notice that `pyproject.toml` and `poetry.lock` are **not** touched when you add optional dependencies below.
|
||||
|
||||
If you're adding a new dependency to Langchain, assume that it will be an optional dependency, and
|
||||
that most users won't have it installed.
|
||||
@@ -216,20 +214,12 @@ that most users won't have it installed.
|
||||
Users who do not have the dependency installed should be able to **import** your code without
|
||||
any side effects (no warnings, no errors, no exceptions).
|
||||
|
||||
To introduce the dependency to the pyproject.toml file correctly, please do the following:
|
||||
To introduce the dependency to a library, please do the following:
|
||||
|
||||
1. Add the dependency to the main group as an optional dependency
|
||||
```bash
|
||||
poetry add --optional [package_name]
|
||||
```
|
||||
2. Open pyproject.toml and add the dependency to the `extended_testing` extra
|
||||
3. Relock the poetry file to update the extra.
|
||||
```bash
|
||||
poetry lock --no-update
|
||||
```
|
||||
4. Add a unit test that the very least attempts to import the new code. Ideally, the unit
|
||||
1. Open extended_testing_deps.txt and add the dependency
|
||||
2. Add a unit test that the very least attempts to import the new code. Ideally, the unit
|
||||
test makes use of lightweight fixtures to test the logic of the code.
|
||||
5. Please use the `@pytest.mark.requires(package_name)` decorator for any tests that require the dependency.
|
||||
3. Please use the `@pytest.mark.requires(package_name)` decorator for any unit tests that require the dependency.
|
||||
|
||||
## Adding a Jupyter Notebook
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ The below sections are listed roughly in order of increasing level of abstractio
|
||||
|
||||
### Expression Language
|
||||
|
||||
[LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language) is the fundamental way that most LangChain components fit together, and this section is designed to teach
|
||||
[LangChain Expression Language (LCEL)](/docs/concepts#langchain-expression-language-lcel) is the fundamental way that most LangChain components fit together, and this section is designed to teach
|
||||
developers how to use it to build with LangChain's primitives effectively.
|
||||
|
||||
This section should contains **Tutorials** that teach how to stream and use LCEL primitives for more abstract tasks, **Explanations** of specific behaviors,
|
||||
@@ -88,7 +88,7 @@ Concepts covered in `Integrations` should generally exist in `langchain_communit
|
||||
|
||||
### Guides and Ecosystem
|
||||
|
||||
The [Guides](/docs/tutorials) and [Ecosystem](/docs/langsmith/) sections should contain guides that address higher-level problems than the sections above.
|
||||
The [Guides](/docs/tutorials) and [Ecosystem](https://docs.smith.langchain.com/) sections should contain guides that address higher-level problems than the sections above.
|
||||
This includes, but is not limited to, considerations around productionization and development workflows.
|
||||
|
||||
These should contain mostly **How-to guides**, **Explanations**, and **Tutorials**.
|
||||
|
||||
@@ -71,6 +71,8 @@ make docs_clean
|
||||
make api_docs_clean
|
||||
```
|
||||
|
||||
|
||||
|
||||
Next, you can build the documentation as outlined below:
|
||||
|
||||
```bash
|
||||
@@ -78,6 +80,18 @@ make docs_build
|
||||
make api_docs_build
|
||||
```
|
||||
|
||||
:::tip
|
||||
|
||||
The `make api_docs_build` command takes a long time. If you're making cosmetic changes to the API docs and want to see how they look, use:
|
||||
|
||||
```bash
|
||||
make api_docs_quick_preview
|
||||
```
|
||||
|
||||
which will just build a small subset of the API reference.
|
||||
|
||||
:::
|
||||
|
||||
Finally, run the link checker to ensure all links are valid:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -48,7 +48,7 @@ In a similar vein, we do enforce certain linting, formatting, and documentation
|
||||
If you are finding these difficult (or even just annoying) to work with, feel free to contact a maintainer for help -
|
||||
we do not want these to get in the way of getting good code into the codebase.
|
||||
|
||||
# 🌟 Recognition
|
||||
### 🌟 Recognition
|
||||
|
||||
If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)!
|
||||
If you have a Twitter account you would like us to mention, please let us know in the PR or through another means.
|
||||
If you have a Twitter account you would like us to mention, please let us know in the PR or through another means.
|
||||
|
||||
@@ -15,12 +15,22 @@ Here's the structure visualized as a tree:
|
||||
├── cookbook # Tutorials and examples
|
||||
├── docs # Contains content for the documentation here: https://python.langchain.com/
|
||||
├── libs
|
||||
│ ├── langchain # Main package
|
||||
│ ├── langchain
|
||||
│ │ ├── langchain
|
||||
│ │ ├── tests/unit_tests # Unit tests (present in each package not shown for brevity)
|
||||
│ │ ├── tests/integration_tests # Integration tests (present in each package not shown for brevity)
|
||||
│ ├── langchain-community # Third-party integrations
|
||||
│ ├── langchain-core # Base interfaces for key abstractions
|
||||
│ ├── langchain-experimental # Experimental components and chains
|
||||
│ ├── community # Third-party integrations
|
||||
│ │ ├── langchain-community
|
||||
│ ├── core # Base interfaces for key abstractions
|
||||
│ │ ├── langchain-core
|
||||
│ ├── experimental # Experimental components and chains
|
||||
│ │ ├── langchain-experimental
|
||||
| ├── cli # Command line interface
|
||||
│ │ ├── langchain-cli
|
||||
│ ├── text-splitters
|
||||
│ │ ├── langchain-text-splitters
|
||||
│ ├── standard-tests
|
||||
│ │ ├── langchain-standard-tests
|
||||
│ ├── partners
|
||||
│ ├── langchain-partner-1
|
||||
│ ├── langchain-partner-2
|
||||
|
||||
BIN
docs/docs/example_data/nke-10k-2023.pdf
Normal file
BIN
docs/docs/example_data/nke-10k-2023.pdf
Normal file
Binary file not shown.
@@ -132,7 +132,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"html_string = \"\"\"\n",
|
||||
" <!DOCTYPE html>\n",
|
||||
|
||||
@@ -138,20 +138,10 @@
|
||||
"execution_count": 5,
|
||||
"id": "d9afb0ca",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/chestercurme/.pyenv/versions/3.10.4/envs/sandbox310/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 0.3.0. Use RunnableSequence, e.g., `prompt | llm` instead.\n",
|
||||
" warn_deprecated(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_core.output_parsers import BaseOutputParser\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
@@ -180,7 +170,7 @@
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"# Chain\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)\n",
|
||||
"llm_chain = QUERY_PROMPT | llm | output_parser\n",
|
||||
"\n",
|
||||
"# Other inputs\n",
|
||||
"question = \"What are the approaches to Task Decomposition?\""
|
||||
@@ -189,14 +179,14 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2eca2d96-8057-4ed9-873d-fa1064c09acf",
|
||||
"id": "59c75c56-dbd7-4887-b9ba-0b5b21069f51",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression analysis?', '4. What are the teachings of the course regarding regression?', '5. In what manner is regression covered in the course curriculum?']\n"
|
||||
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression?', '4. In what way is regression covered in the course?', '5. What are the teachings of the course regarding regression?']\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -15,18 +15,18 @@
|
||||
"id": "f4c03f40-1328-412d-8a48-1db0cd481b77",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Build an Agent\n",
|
||||
"# Build an Agent with AgentExecutor (Legacy)\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"This section will cover building with the legacy LangChain AgentExecutor. These are fine for getting started, but past a certain point, you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd recommend checking out [LangGraph Agents](/docs/concepts/#langgraph) or the [migration guide](/docs/how_to/migrate_agent/)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"By themselves, language models can't take actions - they just output text.\n",
|
||||
"A big use case for LangChain is creating **agents**.\n",
|
||||
"Agents are systems that use an LLM as a reasoning enginer to determine which actions to take and what the inputs to those actions should be.\n",
|
||||
"The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.\n",
|
||||
"Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be.\n",
|
||||
"The results of those actions can then be fed back into the agent and it determines whether more actions are needed, or whether it is okay to finish.\n",
|
||||
"\n",
|
||||
"In this tutorial we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"This section will cover building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd reccommend checking out [LangGraph](/docs/concepts/#langgraph)\n",
|
||||
":::\n",
|
||||
"In this tutorial, we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n",
|
||||
"\n",
|
||||
"## Concepts\n",
|
||||
"\n",
|
||||
@@ -34,7 +34,7 @@
|
||||
"- Using [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n",
|
||||
"- Creating a [Retriever](/docs/concepts/#retrievers) to expose specific information to our agent\n",
|
||||
"- Using a Search [Tool](/docs/concepts/#tools) to look up things online\n",
|
||||
"- [`Chat History`](/docs/concepts/#chat-history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to followup questions. \n",
|
||||
"- [`Chat History`](/docs/concepts/#chat-history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to follow-up questions. \n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"id": "711752cb-4f15-42a3-9838-a0c67f397771",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to attach runtime arguments to a Runnable\n",
|
||||
"# How to add default invocation args to a Runnable\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
|
||||
179
docs/docs/how_to/callbacks_async.ipynb
Normal file
179
docs/docs/how_to/callbacks_async.ipynb
Normal file
@@ -0,0 +1,179 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use callbacks in async environments\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::{.callout-warning}\n",
|
||||
"If you use a sync `CallbackHandler` while using an async method to run your LLM / Chain / Tool / Agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-danger}\n",
|
||||
"\n",
|
||||
"If you're on `python<=3.10`, you need to remember to propagate `config` or `callbacks` when invoking other `runnable` from within a `RunnableLambda`, `RunnableGenerator` or `@tool`. If you do not do this,\n",
|
||||
"the callbacks will not be propagated to the child runnables being invoked.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"zzzz....\n",
|
||||
"Hi! I just woke up. Your llm is starting\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: Here\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: 's\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: a\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: little\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: joke\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: for\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: you\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: :\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: \n",
|
||||
"\n",
|
||||
"Why\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: can\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: 't\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: a\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: bicycle\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: stan\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: d up\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: by\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: itself\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: ?\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: Because\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: it\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: 's\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: two\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: -\n",
|
||||
"Sync handler being called in a `thread_pool_executor`: token: tire\n",
|
||||
"zzzz....\n",
|
||||
"Hi! I just woke up. Your llm is ending\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", message=AIMessage(content=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", id='run-8afc89e8-02c0-4522-8480-d96977240bd4-0'))]], llm_output={}, run=[RunInfo(run_id=UUID('8afc89e8-02c0-4522-8480-d96977240bd4'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.outputs import LLMResult\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomSyncHandler(BaseCallbackHandler):\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
|
||||
" print(f\"Sync handler being called in a `thread_pool_executor`: token: {token}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomAsyncHandler(AsyncCallbackHandler):\n",
|
||||
" \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
|
||||
"\n",
|
||||
" async def on_llm_start(\n",
|
||||
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||||
" ) -> None:\n",
|
||||
" \"\"\"Run when chain starts running.\"\"\"\n",
|
||||
" print(\"zzzz....\")\n",
|
||||
" await asyncio.sleep(0.3)\n",
|
||||
" class_name = serialized[\"name\"]\n",
|
||||
" print(\"Hi! I just woke up. Your llm is starting\")\n",
|
||||
"\n",
|
||||
" async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:\n",
|
||||
" \"\"\"Run when chain ends running.\"\"\"\n",
|
||||
" print(\"zzzz....\")\n",
|
||||
" await asyncio.sleep(0.3)\n",
|
||||
" print(\"Hi! I just woke up. Your llm is ending\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
|
||||
"# Additionally, we pass in a list with our custom handler\n",
|
||||
"chat = ChatAnthropic(\n",
|
||||
" model=\"claude-3-sonnet-20240229\",\n",
|
||||
" max_tokens=25,\n",
|
||||
" streaming=True,\n",
|
||||
" callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"await chat.agenerate([[HumanMessage(content=\"Tell me a joke\")]])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to create your own custom callback handlers.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/docs/how_to/callbacks_attach)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
149
docs/docs/how_to/callbacks_attach.ipynb
Normal file
149
docs/docs/how_to/callbacks_attach.ipynb
Normal file
@@ -0,0 +1,149 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to attach callbacks to a runnable\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence)\n",
|
||||
"- [Attach runtime arguments to a Runnable](/docs/how_to/binding)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n",
|
||||
"\n",
|
||||
":::{.callout-important}\n",
|
||||
"\n",
|
||||
"`with_config()` binds a configuration which will be interpreted as **runtime** configuration. So these callbacks will propagate to all child components.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Chain RunnableSequence started\n",
|
||||
"Chain ChatPromptTemplate started\n",
|
||||
"Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n",
|
||||
"Chat model started\n",
|
||||
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'))]] llm_output={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n",
|
||||
"Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.messages import BaseMessage\n",
|
||||
"from langchain_core.outputs import LLMResult\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class LoggingHandler(BaseCallbackHandler):\n",
|
||||
" def on_chat_model_start(\n",
|
||||
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(\"Chat model started\")\n",
|
||||
"\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
|
||||
" print(f\"Chat model ended, response: {response}\")\n",
|
||||
"\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(f\"Chain {serialized.get('name')} started\")\n",
|
||||
"\n",
|
||||
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
|
||||
" print(f\"Chain ended, outputs: {outputs}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"callbacks = [LoggingHandler()]\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"chain_with_callbacks = chain.with_config(callbacks=callbacks)\n",
|
||||
"\n",
|
||||
"chain_with_callbacks.invoke({\"number\": \"2\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The bound callbacks will run for all nested module runs.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to attach callbacks to a chain.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as how to [pass callbacks in at runtime](/docs/how_to/callbacks_runtime)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
141
docs/docs/how_to/callbacks_constructor.ipynb
Normal file
141
docs/docs/how_to/callbacks_constructor.ipynb
Normal file
@@ -0,0 +1,141 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to propagate callbacks constructor\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Most LangChain modules allow you to pass `callbacks` directly into the constructor (i.e., initializer). In this case, the callbacks will only be called for that instance (and any nested runs).\n",
|
||||
"\n",
|
||||
":::{.callout-warning}\n",
|
||||
"Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children of the object. This can lead to confusing behavior,\n",
|
||||
"and it's generally better to pass callbacks as a run time argument.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Chat model started\n",
|
||||
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0'))]] llm_output={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.messages import BaseMessage\n",
|
||||
"from langchain_core.outputs import LLMResult\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class LoggingHandler(BaseCallbackHandler):\n",
|
||||
" def on_chat_model_start(\n",
|
||||
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(\"Chat model started\")\n",
|
||||
"\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
|
||||
" print(f\"Chat model ended, response: {response}\")\n",
|
||||
"\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(f\"Chain {serialized.get('name')} started\")\n",
|
||||
"\n",
|
||||
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
|
||||
" print(f\"Chain ended, outputs: {outputs}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"callbacks = [LoggingHandler()]\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", callbacks=callbacks)\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"chain.invoke({\"number\": \"2\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can see that we only see events from the chat model run - no chain events from the prompt or broader chain.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to pass callbacks into a constructor.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as how to [pass callbacks at runtime](/docs/how_to/callbacks_runtime)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
140
docs/docs/how_to/callbacks_runtime.ipynb
Normal file
140
docs/docs/how_to/callbacks_runtime.ipynb
Normal file
@@ -0,0 +1,140 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass callbacks in at runtime\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
|
||||
"\n",
|
||||
"This prevents us from having to manually attach the handlers to each individual nested object. Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Chain RunnableSequence started\n",
|
||||
"Chain ChatPromptTemplate started\n",
|
||||
"Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n",
|
||||
"Chat model started\n",
|
||||
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'))]] llm_output={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n",
|
||||
"Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any, Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.messages import BaseMessage\n",
|
||||
"from langchain_core.outputs import LLMResult\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class LoggingHandler(BaseCallbackHandler):\n",
|
||||
" def on_chat_model_start(\n",
|
||||
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(\"Chat model started\")\n",
|
||||
"\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
|
||||
" print(f\"Chat model ended, response: {response}\")\n",
|
||||
"\n",
|
||||
" def on_chain_start(\n",
|
||||
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
|
||||
" ) -> None:\n",
|
||||
" print(f\"Chain {serialized.get('name')} started\")\n",
|
||||
"\n",
|
||||
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
|
||||
" print(f\"Chain ended, outputs: {outputs}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"callbacks = [LoggingHandler()]\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"chain.invoke({\"number\": \"2\"}, config={\"callbacks\": callbacks})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If there are already existing callbacks associated with a module, these will run in addition to any passed in at runtime.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to pass callbacks at runtime.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as how to [pass callbacks into a module constructor](/docs/how_to/custom_callbacks)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,5 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "f781411d",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [charactertextsplitter]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3ee8d00",
|
||||
|
||||
@@ -170,7 +170,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We can do the same thing with a SQLite cache\n",
|
||||
"from langchain.cache import SQLiteCache\n",
|
||||
"from langchain_community.cache import SQLiteCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(SQLiteCache(database_path=\".langchain.db\"))"
|
||||
]
|
||||
|
||||
157
docs/docs/how_to/chat_models_universal_init.ipynb
Normal file
157
docs/docs/how_to/chat_models_universal_init.ipynb
Normal file
@@ -0,0 +1,157 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cfdf4f09-8125-4ed1-8063-6feed57da8a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to init any model in one line\n",
|
||||
"\n",
|
||||
"Many LLM applications let end users specify what model provider and model they want the application to be powered by. This requires writing some logic to initialize different ChatModels based on some user configuration. The `init_chat_model()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names.\n",
|
||||
"\n",
|
||||
":::tip Supported models\n",
|
||||
"\n",
|
||||
"See the [init_chat_model()](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n",
|
||||
"\n",
|
||||
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "165b0de6-9ae3-4e3d-aa98-4fc8a97c4a06",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea2c9f57-a796-45f8-b6f4-3efd3f361a9b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. You can call me Assistant! How can I help you today?\n",
|
||||
"\n",
|
||||
"Claude Opus: My name is Claude. It's nice to meet you!\n",
|
||||
"\n",
|
||||
"Gemini 1.5: I am a large language model, trained by Google. I do not have a name. \n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"# Returns a langchain_openai.ChatOpenAI instance.\n",
|
||||
"gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n",
|
||||
"# Returns a langchain_anthropic.ChatAnthropic instance.\n",
|
||||
"claude_opus = init_chat_model(\n",
|
||||
" \"claude-3-opus-20240229\", model_provider=\"anthropic\", temperature=0\n",
|
||||
")\n",
|
||||
"# Returns a langchain_google_vertexai.ChatVertexAI instance.\n",
|
||||
"gemini_15 = init_chat_model(\n",
|
||||
" \"gemini-1.5-pro\", model_provider=\"google_vertexai\", temperature=0\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Since all model integrations implement the ChatModel interface, you can use them in the same way.\n",
|
||||
"print(\"GPT-4o: \" + gpt_4o.invoke(\"what's your name\").content + \"\\n\")\n",
|
||||
"print(\"Claude Opus: \" + claude_opus.invoke(\"what's your name\").content + \"\\n\")\n",
|
||||
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fff9a4c8-b6ee-4a1a-8d3d-0ecaa312d4ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Simple config example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "75c25d39-bf47-4b51-a6c6-64d9c572bfd6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_config = {\n",
|
||||
" \"model\": \"...user-specified...\",\n",
|
||||
" \"model_provider\": \"...user-specified...\",\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"max_tokens\": 1000,\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(**user_config)\n",
|
||||
"llm.invoke(\"what's your name\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f811f219-5e78-4b62-b495-915d52a22532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Inferring model provider\n",
|
||||
"\n",
|
||||
"For common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://api.python.langchain.com/en/latest/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
|
||||
"claude_opus = init_chat_model(\"claude-3-opus-20240229\", temperature=0)\n",
|
||||
"gemini_15 = init_chat_model(\"gemini-1.5-pro\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "da07b5c0-d2e6-42e4-bfcd-2efcfaae6221",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -14,35 +14,51 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls."
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-openai >= 0.1.8`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9c7d1338-dd1b-4d06-b33d-d5cffc49fd6a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a55e87a-3291-4e7f-8e8e-4c69b0854384",
|
||||
"id": "598ae1e2-a52d-4459-81fd-cdc68b06742a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using AIMessage.response_metadata\n",
|
||||
"## Using LangSmith\n",
|
||||
"\n",
|
||||
"A number of model providers return token usage information as part of the chat generation response. When available, this is included in the [`AIMessage.response_metadata`](/docs/how_to/response_metadata) field. Here's an example with OpenAI:"
|
||||
"You can use [LangSmith](https://www.langchain.com/langsmith) to help track token usage in your LLM application. See the [LangSmith quick start guide](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"## Using AIMessage.usage_metadata\n",
|
||||
"\n",
|
||||
"A number of model providers return token usage information as part of the chat generation response. When available, this information will be included on the `AIMessage` objects produced by the corresponding model.\n",
|
||||
"\n",
|
||||
"LangChain `AIMessage` objects include a [usage_metadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `\"input_tokens\"` and `\"output_tokens\"`).\n",
|
||||
"\n",
|
||||
"Examples:\n",
|
||||
"\n",
|
||||
"**OpenAI**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "467ccdeb-6b62-45e5-816e-167cd24d2586",
|
||||
"id": "b39bf807-4125-4db4-bbf7-28a46afff6b4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'token_usage': {'completion_tokens': 225,\n",
|
||||
" 'prompt_tokens': 17,\n",
|
||||
" 'total_tokens': 242},\n",
|
||||
" 'model_name': 'gpt-4-turbo',\n",
|
||||
" 'system_fingerprint': 'fp_76f018034d',\n",
|
||||
" 'finish_reason': 'stop',\n",
|
||||
" 'logprobs': None}"
|
||||
"{'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
@@ -51,37 +67,33 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# !pip install -qU langchain-openai\n",
|
||||
"# # !pip install -qU langchain-openai\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4-turbo\")\n",
|
||||
"msg = llm.invoke([(\"human\", \"What's the oldest known example of cuneiform\")])\n",
|
||||
"msg.response_metadata"
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
||||
"openai_response = llm.invoke(\"hello\")\n",
|
||||
"openai_response.usage_metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9d5026e9-3ad4-41e6-9946-9f1a26f4a21f",
|
||||
"id": "2299c44a-2fe6-4d52-a6a2-99ff6d231c73",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And here's an example with Anthropic:"
|
||||
"**Anthropic**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "145404f1-e088-4824-b468-236c486a9903",
|
||||
"id": "9c82ff80-ec4e-4049-b019-5f0bbd7df82a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'id': 'msg_01P61rdHbapEo6h3fjpfpCQT',\n",
|
||||
" 'model': 'claude-3-sonnet-20240229',\n",
|
||||
" 'stop_reason': 'end_turn',\n",
|
||||
" 'stop_sequence': None,\n",
|
||||
" 'usage': {'input_tokens': 17, 'output_tokens': 306}}"
|
||||
"{'input_tokens': 8, 'output_tokens': 12, 'total_tokens': 20}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
@@ -94,9 +106,222 @@
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
|
||||
"msg = llm.invoke([(\"human\", \"What's the oldest known example of cuneiform\")])\n",
|
||||
"msg.response_metadata"
|
||||
"llm = ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
|
||||
"anthropic_response = llm.invoke(\"hello\")\n",
|
||||
"anthropic_response.usage_metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d4efc15-ba9f-4b3d-9278-8e01f99f263f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using AIMessage.response_metadata\n",
|
||||
"\n",
|
||||
"Metadata from the model response is also included in the AIMessage [response_metadata](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.response_metadata) attribute. These data are typically not standardized. Note that different providers adopt different conventions for representing token counts:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "f156f9da-21f2-4c81-a714-54cbf9ad393e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI: {'completion_tokens': 9, 'prompt_tokens': 8, 'total_tokens': 17}\n",
|
||||
"\n",
|
||||
"Anthropic: {'input_tokens': 8, 'output_tokens': 12}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f'OpenAI: {openai_response.response_metadata[\"token_usage\"]}\\n')\n",
|
||||
"print(f'Anthropic: {anthropic_response.response_metadata[\"usage\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4ef2c43-0ff6-49eb-9782-e4070c9da8d7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"Some providers support token count metadata in a streaming context.\n",
|
||||
"\n",
|
||||
"#### OpenAI\n",
|
||||
"\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_options={\"include_usage\": True}`.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"By default, the last message chunk in a stream will include a `\"finish_reason\"` in the message's `response_metadata` attribute. If we include token usage in streaming mode, an additional chunk containing usage metadata will be added to the end of the stream, such that `\"finish_reason\"` appears on the second to last message chunk.\n",
|
||||
":::\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "07f0c872-6b6c-4fed-a129-9b5a858505be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='Hello' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='!' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' How' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' can' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' I' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' assist' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' you' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' today' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='?' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop'} id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
||||
"\n",
|
||||
"aggregate = None\n",
|
||||
"for chunk in llm.stream(\"hello\", stream_options={\"include_usage\": True}):\n",
|
||||
" print(chunk)\n",
|
||||
" aggregate = chunk if aggregate is None else aggregate + chunk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd809ded-8b13-4d5f-be5e-277b79d51802",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the usage metadata will be included in the sum of the individual message chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3db7bc03-a7d4-4704-92ab-f8ba92ef59ae",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! How can I assist you today?\n",
|
||||
"{'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(aggregate.content)\n",
|
||||
"print(aggregate.usage_metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7dba63e8-0ed7-4533-8f0f-78e19c38a25c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To disable streaming token counts for OpenAI, set `\"include_usage\"` to False in `stream_options`, or omit it from the parameters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "67117f2b-ce68-4c1e-9556-2d3849f90e1b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='Hello' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='!' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' How' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' can' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' I' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' assist' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' you' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' today' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='?' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop'} id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"aggregate = None\n",
|
||||
"for chunk in llm.stream(\"hello\"):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6a5d9617-be3a-419a-9276-de9c29fa50ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also enable streaming token usage by setting `model_kwargs` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"See the below example, where we return output structured to a desired schema, but can still observe token usage streamed from intermediate steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "57dec1fb-bd9c-4c98-8798-8fbbe67f6b2c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Token usage: {'input_tokens': 79, 'output_tokens': 23, 'total_tokens': 102}\n",
|
||||
"\n",
|
||||
"setup='Why was the math book sad?' punchline='Because it had too many problems.'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Joke(BaseModel):\n",
|
||||
" \"\"\"Joke to tell user.\"\"\"\n",
|
||||
"\n",
|
||||
" setup: str = Field(description=\"question to set up a joke\")\n",
|
||||
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0125\",\n",
|
||||
" model_kwargs={\"stream_options\": {\"include_usage\": True}},\n",
|
||||
")\n",
|
||||
"# Under the hood, .with_structured_output binds tools to the\n",
|
||||
"# chat model and appends a parser.\n",
|
||||
"structured_llm = llm.with_structured_output(Joke)\n",
|
||||
"\n",
|
||||
"async for event in structured_llm.astream_events(\"Tell me a joke\", version=\"v2\"):\n",
|
||||
" if event[\"event\"] == \"on_chat_model_end\":\n",
|
||||
" print(f'Token usage: {event[\"data\"][\"output\"].usage_metadata}\\n')\n",
|
||||
" elif event[\"event\"] == \"on_chain_end\":\n",
|
||||
" print(event[\"data\"][\"output\"])\n",
|
||||
" else:\n",
|
||||
" pass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bc8d313-4bef-463e-89a5-236d8bb6ab2f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Token usage is also visible in the corresponding [LangSmith trace](https://smith.langchain.com/public/fe6513d5-7212-4045-82e0-fefa28bc7656/r) in the payload from the chat model."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -115,7 +340,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 9,
|
||||
"id": "31667d54",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -123,11 +348,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 26\n",
|
||||
"Tokens Used: 27\n",
|
||||
"\tPrompt Tokens: 11\n",
|
||||
"\tCompletion Tokens: 15\n",
|
||||
"\tCompletion Tokens: 16\n",
|
||||
"Successful Requests: 1\n",
|
||||
"Total Cost (USD): $0.00056\n"
|
||||
"Total Cost (USD): $2.95e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -136,7 +361,7 @@
|
||||
"\n",
|
||||
"from langchain_community.callbacks.manager import get_openai_callback\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4-turbo\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
@@ -153,7 +378,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 10,
|
||||
"id": "e09420f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -161,7 +386,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"52\n"
|
||||
"55\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -172,6 +397,39 @@
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ac51188-c8f4-4230-90fd-3cd78cdd955d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"Cost information is currently not available in streaming mode. This is because model names are currently not propagated through chunks in streaming mode, and the model name is used to look up the correct pricing. Token counts however are available:\n",
|
||||
":::\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "b241069a-265d-4497-af34-b0a5f95ae67f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"28\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" for chunk in llm.stream(\"Tell me a joke\", stream_options={\"include_usage\": True}):\n",
|
||||
" pass\n",
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d8186e7b",
|
||||
@@ -182,7 +440,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 12,
|
||||
"id": "5d1125c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -211,15 +469,15 @@
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"We have to set `stream_runnable=False` for token counting to work. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events. However, OpenAI does not return token counts when streaming model responses, so we need to turn off the underlying streaming.\n",
|
||||
"We have to set `stream_runnable=False` for cost information, as described above. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events.\n",
|
||||
":::\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "2f98c536",
|
||||
"execution_count": 13,
|
||||
"id": "3950d88b-8bfb-4294-b75b-e6fd421e633c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -230,46 +488,51 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `Hummingbird`\n",
|
||||
"Invoking: `wikipedia` with `{'query': 'hummingbird scientific name'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: Hummingbird\n",
|
||||
"Summary: Hummingbirds are birds native to the Americas and comprise the biological family Trochilidae. With approximately 366 species and 113 genera, they occur from Alaska to Tierra del Fuego, but most species are found in Central and South America. As of 2024, 21 hummingbird species are listed as endangered or critically endangered, with numerous species declining in population.Hummingbirds have varied specialized characteristics to enable rapid, maneuverable flight: exceptional metabolic capacity, adaptations to high altitude, sensitive visual and communication abilities, and long-distance migration in some species. Among all birds, male hummingbirds have the widest diversity of plumage color, particularly in blues, greens, and purples. Hummingbirds are the smallest mature birds, measuring 7.5–13 cm (3–5 in) in length. The smallest is the 5 cm (2.0 in) bee hummingbird, which weighs less than 2.0 g (0.07 oz), and the largest is the 23 cm (9 in) giant hummingbird, weighing 18–24 grams (0.63–0.85 oz). Noted for long beaks, hummingbirds are specialized for feeding on flower nectar, but all species also consume small insects.\n",
|
||||
"Summary: Hummingbirds are birds native to the Americas and comprise the biological family Trochilidae. With approximately 366 species and 113 genera, they occur from Alaska to Tierra del Fuego, but most species are found in Central and South America. As of 2024, 21 hummingbird species are listed as endangered or critically endangered, with numerous species declining in population.\n",
|
||||
"Hummingbirds have varied specialized characteristics to enable rapid, maneuverable flight: exceptional metabolic capacity, adaptations to high altitude, sensitive visual and communication abilities, and long-distance migration in some species. Among all birds, male hummingbirds have the widest diversity of plumage color, particularly in blues, greens, and purples. Hummingbirds are the smallest mature birds, measuring 7.5–13 cm (3–5 in) in length. The smallest is the 5 cm (2.0 in) bee hummingbird, which weighs less than 2.0 g (0.07 oz), and the largest is the 23 cm (9 in) giant hummingbird, weighing 18–24 grams (0.63–0.85 oz). Noted for long beaks, hummingbirds are specialized for feeding on flower nectar, but all species also consume small insects.\n",
|
||||
"They are known as hummingbirds because of the humming sound created by their beating wings, which flap at high frequencies audible to other birds and humans. They hover at rapid wing-flapping rates, which vary from around 12 beats per second in the largest species to 80 per second in small hummingbirds.\n",
|
||||
"Hummingbirds have the highest mass-specific metabolic rate of any homeothermic animal. To conserve energy when food is scarce and at night when not foraging, they can enter torpor, a state similar to hibernation, and slow their metabolic rate to 1⁄15 of its normal rate. While most hummingbirds do not migrate, the rufous hummingbird has one of the longest migrations among birds, traveling twice per year between Alaska and Mexico, a distance of about 3,900 miles (6,300 km).\n",
|
||||
"Hummingbirds split from their sister group, the swifts and treeswifts, around 42 million years ago. The oldest known fossil hummingbird is Eurotrochilus, from the Rupelian Stage of Early Oligocene Europe.\n",
|
||||
"\n",
|
||||
"Page: Rufous hummingbird\n",
|
||||
"Summary: The rufous hummingbird (Selasphorus rufus) is a small hummingbird, about 8 cm (3.1 in) long with a long, straight and slender bill. These birds are known for their extraordinary flight skills, flying 2,000 mi (3,200 km) during their migratory transits. It is one of nine species in the genus Selasphorus.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Bee hummingbird\n",
|
||||
"Summary: The bee hummingbird, zunzuncito or Helena hummingbird (Mellisuga helenae) is a species of hummingbird, native to the island of Cuba in the Caribbean. It is the smallest known bird. The bee hummingbird feeds on nectar of flowers and bugs found in Cuba.\n",
|
||||
"\n",
|
||||
"Page: Hummingbird cake\n",
|
||||
"Summary: Hummingbird cake is a banana-pineapple spice cake originating in Jamaica and a popular dessert in the southern United States since the 1970s. Ingredients include flour, sugar, salt, vegetable oil, ripe banana, pineapple, cinnamon, pecans, vanilla extract, eggs, and leavening agent. It is often served with cream cheese frosting.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `Fastest bird`\n",
|
||||
"Page: Anna's hummingbird\n",
|
||||
"Summary: Anna's hummingbird (Calypte anna) is a North American species of hummingbird. It was named after Anna Masséna, Duchess of Rivoli.\n",
|
||||
"It is native to western coastal regions of North America. In the early 20th century, Anna's hummingbirds bred only in northern Baja California and Southern California. The transplanting of exotic ornamental plants in residential areas throughout the Pacific coast and inland deserts provided expanded nectar and nesting sites, allowing the species to expand its breeding range. Year-round residence of Anna's hummingbirds in the Pacific Northwest is an example of ecological release dependent on acclimation to colder winter temperatures, introduced plants, and human provision of nectar feeders during winter.\n",
|
||||
"These birds feed on nectar from flowers using a long extendable tongue. They also consume small insects and other arthropods caught in flight or gleaned from vegetation.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `{'query': 'fastest bird species'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: Fastest animals\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of birds by flight speed\n",
|
||||
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Fastest animals\n",
|
||||
"Summary: This is a list of the fastest animals in the world, by types of animal.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: List of birds by flight speed\n",
|
||||
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon, able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
|
||||
"\n",
|
||||
"Page: Ostrich\n",
|
||||
"Summary: Ostriches are large flightless birds. They are the heaviest and largest living birds, with adult common ostriches weighing anywhere between 63.5 and 145 kilograms and laying the largest eggs of any living land animal. With the ability to run at 70 km/h (43.5 mph), they are the fastest birds on land. They are farmed worldwide, with significant industries in the Philippines and in Namibia. Ostrich leather is a lucrative commodity, and the large feathers are used as plumes for the decoration of ceremonial headgear. Ostrich eggs have been used by humans for millennia.\n",
|
||||
"Ostriches are of the genus Struthio in the order Struthioniformes, part of the infra-class Palaeognathae, a diverse group of flightless birds also known as ratites that includes the emus, rheas, cassowaries, kiwis and the extinct elephant birds and moas. There are two living species of ostrich: the common ostrich, native to large areas of sub-Saharan Africa, and the Somali ostrich, native to the Horn of Africa. The common ostrich was historically native to the Arabian Peninsula, and ostriches were present across Asia as far east as China and Mongolia during the Late Pleistocene and possibly into the Holocene.\u001b[0m\u001b[32;1m\u001b[1;3m### Hummingbird's Scientific Name\n",
|
||||
"The scientific name for the bee hummingbird, which is the smallest known bird and a species of hummingbird, is **Mellisuga helenae**. It is native to Cuba.\n",
|
||||
"\n",
|
||||
"### Fastest Bird Species\n",
|
||||
"The fastest bird in terms of airspeed is the **peregrine falcon**, which can exceed speeds of 320 km/h (200 mph) during its diving flight. In level flight, the fastest confirmed speed is held by the **common swift**, which can fly at 111.5 km/h (69.3 mph).\u001b[0m\n",
|
||||
"Page: Falcon\n",
|
||||
"Summary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.\n",
|
||||
"Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.\n",
|
||||
"The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial \"tooth\" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.\n",
|
||||
"The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.\n",
|
||||
"Some small falcons with long, narrow wings are called \"hobbies\" and some which hover while hunting are called \"kestrels\".\n",
|
||||
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species is the peregrine falcon (Falco peregrinus), which can exceed speeds of 320 km/h (200 mph) in its dives.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Total Tokens: 1583\n",
|
||||
"Prompt Tokens: 1412\n",
|
||||
"Completion Tokens: 171\n",
|
||||
"Total Cost (USD): $0.019250000000000003\n"
|
||||
"Total Tokens: 1787\n",
|
||||
"Prompt Tokens: 1687\n",
|
||||
"Completion Tokens: 100\n",
|
||||
"Total Cost (USD): $0.0009935\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -298,19 +561,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "4a3eced5-2ff7-49a7-a48b-768af8658323",
|
||||
"execution_count": 12,
|
||||
"id": "1837c807-136a-49d8-9c33-060e58dc16d2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 0\n",
|
||||
"\tPrompt Tokens: 0\n",
|
||||
"\tCompletion Tokens: 0\n",
|
||||
"Tokens Used: 96\n",
|
||||
"\tPrompt Tokens: 26\n",
|
||||
"\tCompletion Tokens: 70\n",
|
||||
"Successful Requests: 2\n",
|
||||
"Total Cost (USD): $0.0\n"
|
||||
"Total Cost (USD): $0.001888\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -364,7 +627,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -71,13 +71,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")"
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -95,19 +95,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I said \"J\\'adore la programmation,\" which means \"I love programming\" in French.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I said \"J'adore la programmation,\" which means \"I love programming\" in French.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@@ -115,23 +111,25 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"messages\"),\n",
|
||||
" (\"placeholder\", \"{messages}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
"ai_msg = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French: I love programming.\"\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Translate this sentence from English to French: I love programming.\",\n",
|
||||
" ),\n",
|
||||
" AIMessage(content=\"J'adore la programmation.\"),\n",
|
||||
" HumanMessage(content=\"What did you just say?\"),\n",
|
||||
" (\"ai\", \"J'adore la programmation.\"),\n",
|
||||
" (\"human\", \"What did you just say?\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
")"
|
||||
")\n",
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -165,7 +163,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
@@ -193,7 +191,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.')"
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@@ -250,7 +248,7 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
@@ -304,10 +302,17 @@
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run dc4e2f79-4bcd-4a36-9506-55ace9040588 not found for run 34b5773e-3ced-46a6-8daf-4d464c15c940. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The translation of \"I love programming\" in French is \"J\\'adore la programmation.\"')"
|
||||
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -327,10 +332,17 @@
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run cc14b9d8-c59e-40db-a523-d6ab3fc2fa4f not found for run 5b75e25c-131e-46ee-9982-68569db04330. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.')"
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
@@ -354,12 +366,12 @@
|
||||
"\n",
|
||||
"### Trimming messages\n",
|
||||
"\n",
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is to only load and store the most recent `n` messages. Let's use an example history with some preloaded messages:"
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the historic messages before passing them to the model. Let's use an example history with some preloaded messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -371,7 +383,7 @@
|
||||
" AIMessage(content='Fine thanks!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -396,34 +408,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 7ff2d8ec-65e2-4f67-8961-e498e2c4a591 not found for run 3881e990-6596-4326-84f6-2b76949e0657. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Nemo.')"
|
||||
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"chain_with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
@@ -443,34 +449,33 @@
|
||||
"source": [
|
||||
"We can see the chain remembers the preloaded name.\n",
|
||||
"\n",
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the `clear` method to remove messages and re-add them to the history. We don't have to, but let's put this method at the front of our chain to ensure it's always called:"
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the built in [trim_messages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import trim_messages\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def trim_messages(chain_input):\n",
|
||||
" stored_messages = demo_ephemeral_chat_history.messages\n",
|
||||
" if len(stored_messages) <= 2:\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
" demo_ephemeral_chat_history.clear()\n",
|
||||
"\n",
|
||||
" for message in stored_messages[-2:]:\n",
|
||||
" demo_ephemeral_chat_history.add_message(message)\n",
|
||||
"\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"trimmer = trim_messages(strategy=\"last\", max_tokens=2, token_counter=len)\n",
|
||||
"\n",
|
||||
"chain_with_trimming = (\n",
|
||||
" RunnablePassthrough.assign(messages_trimmed=trim_messages)\n",
|
||||
" | chain_with_message_history\n",
|
||||
" RunnablePassthrough.assign(chat_history=itemgetter(\"chat_history\") | trimmer)\n",
|
||||
" | prompt\n",
|
||||
" | chat\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain_with_trimmed_history = RunnableWithMessageHistory(\n",
|
||||
" chain_with_trimming,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -483,22 +488,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 775cde65-8d22-4c44-80bb-f0b9811c32ca not found for run 5cf71d0e-4663-41cd-8dbe-e9752689cfac. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\")"
|
||||
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimming.invoke(\n",
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"Where does P. Sherman live?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
@@ -506,19 +518,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.'),\n",
|
||||
"[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content='Hello!'),\n",
|
||||
" HumanMessage(content='How are you today?'),\n",
|
||||
" AIMessage(content='Fine thanks!'),\n",
|
||||
" HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
|
||||
" HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\")]"
|
||||
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -536,48 +552,39 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run fde7123f-6fd3-421a-a3fc-2fb37dead119 not found for run 061a4563-2394-470d-a3ed-9bf1388ca431. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm sorry, I don't have access to your personal information.\")"
|
||||
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimming.invoke(\n",
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"What is my name?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content=\"P. Sherman's address is 42 Wallaby Way, Sydney.\"),\n",
|
||||
" HumanMessage(content='What is my name?'),\n",
|
||||
" AIMessage(content=\"I'm sorry, I don't have access to your personal information.\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
"Check out our [how to guide on trimming messages](/docs/how_to/trim_messages/) for more."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -638,7 +645,7 @@
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"user\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
@@ -672,7 +679,7 @@
|
||||
" return False\n",
|
||||
" summarization_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" \"Distill the above chat messages into a single summary message. Include as many specific details as you can.\",\n",
|
||||
@@ -772,9 +779,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -89,7 +89,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
@@ -312,8 +312,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
141
docs/docs/how_to/custom_callbacks.ipynb
Normal file
141
docs/docs/how_to/custom_callbacks.ipynb
Normal file
@@ -0,0 +1,141 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create custom callback handlers\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Callbacks](/docs/concepts/#callbacks)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n",
|
||||
"\n",
|
||||
"To create a custom callback handler, we need to determine the [event(s)](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n",
|
||||
"\n",
|
||||
"In the example below, we'll implement streaming with a custom handler.\n",
|
||||
"\n",
|
||||
"In our custom callback handler `MyCustomHandler`, we implement the `on_llm_new_token` handler to print the token we have just received. We then attach our custom handler to the model object as a constructor callback."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"My custom handler, token: Here\n",
|
||||
"My custom handler, token: 's\n",
|
||||
"My custom handler, token: a\n",
|
||||
"My custom handler, token: bear\n",
|
||||
"My custom handler, token: joke\n",
|
||||
"My custom handler, token: for\n",
|
||||
"My custom handler, token: you\n",
|
||||
"My custom handler, token: :\n",
|
||||
"My custom handler, token: \n",
|
||||
"\n",
|
||||
"Why\n",
|
||||
"My custom handler, token: di\n",
|
||||
"My custom handler, token: d the\n",
|
||||
"My custom handler, token: bear\n",
|
||||
"My custom handler, token: dissol\n",
|
||||
"My custom handler, token: ve\n",
|
||||
"My custom handler, token: in\n",
|
||||
"My custom handler, token: water\n",
|
||||
"My custom handler, token: ?\n",
|
||||
"My custom handler, token: \n",
|
||||
"Because\n",
|
||||
"My custom handler, token: it\n",
|
||||
"My custom handler, token: was\n",
|
||||
"My custom handler, token: a\n",
|
||||
"My custom handler, token: polar\n",
|
||||
"My custom handler, token: bear\n",
|
||||
"My custom handler, token: !\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomHandler(BaseCallbackHandler):\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
|
||||
" print(f\"My custom handler, token: {token}\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([\"Tell me a joke about {animal}\"])\n",
|
||||
"\n",
|
||||
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
|
||||
"# Additionally, we pass in our custom handler as a list to the callbacks parameter\n",
|
||||
"model = ChatAnthropic(\n",
|
||||
" model=\"claude-3-sonnet-20240229\", streaming=True, callbacks=[MyCustomHandler()]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model\n",
|
||||
"\n",
|
||||
"response = chain.invoke({\"animal\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can see [this reference page](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"You've now learned how to create your own custom callback handlers.\n",
|
||||
"\n",
|
||||
"Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/docs/how_to/callbacks_attach)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -5,35 +5,29 @@
|
||||
"id": "5436020b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create custom Tools\n",
|
||||
"# How to create custom tools\n",
|
||||
"\n",
|
||||
"When constructing your own agent, you will need to provide it with a list of Tools that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
|
||||
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
|
||||
"\n",
|
||||
"- `name` (str), is required and must be unique within a set of tools provided to an agent\n",
|
||||
"- `description` (str), is optional but recommended, as it is used by an agent to determine tool use\n",
|
||||
"- `args_schema` (Pydantic BaseModel), is optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters.\n",
|
||||
"| Attribute | Type | Description |\n",
|
||||
"|-----------------|---------------------------|------------------------------------------------------------------------------------------------------------------|\n",
|
||||
"| name | str | Must be unique within a set of tools provided to an LLM or agent. |\n",
|
||||
"| description | str | Describes what the tool does. Used as context by the LLM or agent. |\n",
|
||||
"| args_schema | Pydantic BaseModel | Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters |\n",
|
||||
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
|
||||
"\n",
|
||||
"LangChain provides 3 ways to create tools:\n",
|
||||
"\n",
|
||||
"There are multiple ways to define a tool. In this guide, we will walk through how to do for two functions:\n",
|
||||
"1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool.\n",
|
||||
"2. Using [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations.\n",
|
||||
"3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
|
||||
"\n",
|
||||
"1. A made up search function that always returns the string \"LangChain\"\n",
|
||||
"2. A multiplier function that will multiply two numbers by eachother\n",
|
||||
"The `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases.\n",
|
||||
"\n",
|
||||
"The biggest difference here is that the first function only requires one input, while the second one requires multiple. Many agents only work with functions that require single inputs, so it's important to know how to work with those. For the most part, defining these custom tools is the same, but there are some differences."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "1aaba18c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain.tools import BaseTool, StructuredTool, tool"
|
||||
":::{.callout-tip}\n",
|
||||
"\n",
|
||||
"Models will perform better if the tools have well chosen names, descriptions and JSON schemas.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -48,56 +42,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "b0ce7de8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool\n",
|
||||
"def search(query: str) -> str:\n",
|
||||
" \"\"\"Look up things online.\"\"\"\n",
|
||||
" return \"LangChain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e889fa34",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"search\n",
|
||||
"search(query: str) -> str - Look up things online.\n",
|
||||
"{'query': {'title': 'Query', 'type': 'string'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(search.name)\n",
|
||||
"print(search.description)\n",
|
||||
"print(search.args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "0b9694d9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "d7f9395b",
|
||||
"execution_count": 1,
|
||||
"id": "cc7005cd-072f-4d37-8453-6297468e5192",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -111,11 +57,45 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Let's inspect some of the attributes associated with the tool.\n",
|
||||
"print(multiply.name)\n",
|
||||
"print(multiply.description)\n",
|
||||
"print(multiply.args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96698b67-993a-4c97-b867-333132e1eb14",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Or create an **async** implementation, like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0c0991db-b997-4611-be37-4346e660506b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"async def amultiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98d6eee9",
|
||||
@@ -126,72 +106,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "dbbf4b6c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SearchInput(BaseModel):\n",
|
||||
" query: str = Field(description=\"should be a search query\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool(\"search-tool\", args_schema=SearchInput, return_direct=True)\n",
|
||||
"def search(query: str) -> str:\n",
|
||||
" \"\"\"Look up things online.\"\"\"\n",
|
||||
" return \"LangChain\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "5950ce32",
|
||||
"execution_count": 3,
|
||||
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"search-tool\n",
|
||||
"search-tool(query: str) -> str - Look up things online.\n",
|
||||
"{'query': {'title': 'Query', 'description': 'should be a search query', 'type': 'string'}}\n",
|
||||
"multiplication-tool\n",
|
||||
"multiplication-tool(a: int, b: int) -> int - Multiply two numbers.\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
|
||||
"True\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(search.name)\n",
|
||||
"print(search.description)\n",
|
||||
"print(search.args)\n",
|
||||
"print(search.return_direct)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9d11e80c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Subclass BaseTool\n",
|
||||
"\n",
|
||||
"You can also explicitly define a custom tool by subclassing the BaseTool class. This provides maximal control over the tool definition, but is a bit more work."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "1dad8f8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class SearchInput(BaseModel):\n",
|
||||
" query: str = Field(description=\"should be a search query\")\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CalculatorInput(BaseModel):\n",
|
||||
@@ -199,22 +130,145 @@
|
||||
" b: int = Field(description=\"second number\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CustomSearchTool(BaseTool):\n",
|
||||
" name = \"custom_search\"\n",
|
||||
" description = \"useful for when you need to answer questions about current events\"\n",
|
||||
" args_schema: Type[BaseModel] = SearchInput\n",
|
||||
"@tool(\"multiplication-tool\", args_schema=CalculatorInput, return_direct=True)\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
" def _run(\n",
|
||||
" self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None\n",
|
||||
" ) -> str:\n",
|
||||
" \"\"\"Use the tool.\"\"\"\n",
|
||||
" return \"LangChain\"\n",
|
||||
"\n",
|
||||
" async def _arun(\n",
|
||||
" self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None\n",
|
||||
" ) -> str:\n",
|
||||
" \"\"\"Use the tool asynchronously.\"\"\"\n",
|
||||
" raise NotImplementedError(\"custom_search does not support async\")\n",
|
||||
"# Let's inspect some of the attributes associated with the tool.\n",
|
||||
"print(multiply.name)\n",
|
||||
"print(multiply.description)\n",
|
||||
"print(multiply.args)\n",
|
||||
"print(multiply.return_direct)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b63fcc3b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## StructuredTool\n",
|
||||
"\n",
|
||||
"The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"6\n",
|
||||
"10\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.tools import StructuredTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def amultiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"calculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)\n",
|
||||
"\n",
|
||||
"print(calculator.invoke({\"a\": 2, \"b\": 3}))\n",
|
||||
"print(await calculator.ainvoke({\"a\": 2, \"b\": 5}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "26b3712a-b38d-4582-b6e6-bc7cfb1d6680",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To configure it:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"6\n",
|
||||
"Calculator\n",
|
||||
"Calculator(a: int, b: int) -> int - multiply numbers\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class CalculatorInput(BaseModel):\n",
|
||||
" a: int = Field(description=\"first number\")\n",
|
||||
" b: int = Field(description=\"second number\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"calculator = StructuredTool.from_function(\n",
|
||||
" func=multiply,\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" description=\"multiply numbers\",\n",
|
||||
" args_schema=CalculatorInput,\n",
|
||||
" return_direct=True,\n",
|
||||
" # coroutine= ... <- you can specify an async method if desired as well\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(calculator.invoke({\"a\": 2, \"b\": 3}))\n",
|
||||
"print(calculator.name)\n",
|
||||
"print(calculator.description)\n",
|
||||
"print(calculator.args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b840074b-9c10-4ca0-aed8-626c52b2398f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Subclass BaseTool\n",
|
||||
"\n",
|
||||
"You can define a custom tool by sub-classing from `BaseTool`. This provides maximal control over the tool definition, but requires writing more code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "1dad8f8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"from langchain.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.callbacks import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CalculatorInput(BaseModel):\n",
|
||||
" a: int = Field(description=\"first number\")\n",
|
||||
" b: int = Field(description=\"second number\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CustomCalculatorTool(BaseTool):\n",
|
||||
@@ -236,35 +290,17 @@
|
||||
" run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n",
|
||||
" ) -> str:\n",
|
||||
" \"\"\"Use the tool asynchronously.\"\"\"\n",
|
||||
" raise NotImplementedError(\"Calculator does not support async\")"
|
||||
" # If the calculation is cheap, you can just delegate to the sync implementation\n",
|
||||
" # as shown below.\n",
|
||||
" # If the sync calculation is expensive, you should delete the entire _arun method.\n",
|
||||
" # LangChain will automatically provide a better implementation that will\n",
|
||||
" # kick off the task in a thread to make sure it doesn't block other async code.\n",
|
||||
" return self._run(a, b, run_manager=run_manager.get_sync())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "89933e27",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"custom_search\n",
|
||||
"useful for when you need to answer questions about current events\n",
|
||||
"{'query': {'title': 'Query', 'description': 'should be a search query', 'type': 'string'}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search = CustomSearchTool()\n",
|
||||
"print(search.name)\n",
|
||||
"print(search.description)\n",
|
||||
"print(search.args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"execution_count": 7,
|
||||
"id": "bb551c33",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -275,7 +311,9 @@
|
||||
"Calculator\n",
|
||||
"useful for when you need to answer questions about math\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
|
||||
"True\n"
|
||||
"True\n",
|
||||
"6\n",
|
||||
"6\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -284,80 +322,50 @@
|
||||
"print(multiply.name)\n",
|
||||
"print(multiply.description)\n",
|
||||
"print(multiply.args)\n",
|
||||
"print(multiply.return_direct)"
|
||||
"print(multiply.return_direct)\n",
|
||||
"\n",
|
||||
"print(multiply.invoke({\"a\": 2, \"b\": 3}))\n",
|
||||
"print(await multiply.ainvoke({\"a\": 2, \"b\": 3}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b63fcc3b",
|
||||
"id": "97aba6cc-4bdf-4fab-aff3-d89e7d9c3a09",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## StructuredTool dataclass\n",
|
||||
"## How to create async tools\n",
|
||||
"\n",
|
||||
"You can also use a `StructuredTool` dataclass. This methods is a mix between the previous two. It's more convenient than inheriting from the BaseTool class, but provides more functionality than just using a decorator."
|
||||
"LangChain Tools implement the [Runnable interface 🏃](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html).\n",
|
||||
"\n",
|
||||
"All Runnables expose the `invoke` and `ainvoke` methods (as well as other methods like `batch`, `abatch`, `astream` etc).\n",
|
||||
"\n",
|
||||
"So even if you only provide an `sync` implementation of a tool, you could still use the `ainvoke` interface, but there\n",
|
||||
"are some important things to know:\n",
|
||||
"\n",
|
||||
"* LangChain's by default provides an async implementation that assumes that the function is expensive to compute, so it'll delegate execution to another thread.\n",
|
||||
"* If you're working in an async codebase, you should create async tools rather than sync tools, to avoid incuring a small overhead due to that thread.\n",
|
||||
"* If you need both sync and async implementations, use `StructuredTool.from_function` or sub-class from `BaseTool`.\n",
|
||||
"* If implementing both sync and async, and the sync code is fast to run, override the default LangChain async implementation and simply call the sync code.\n",
|
||||
"* You CANNOT and SHOULD NOT use the sync `invoke` with an `async` tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "56ff7670",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def search_function(query: str):\n",
|
||||
" return \"LangChain\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"search = StructuredTool.from_function(\n",
|
||||
" func=search_function,\n",
|
||||
" name=\"Search\",\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" # coroutine= ... <- you can specify an async method if desired as well\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "d3fd3896",
|
||||
"execution_count": 8,
|
||||
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Search\n",
|
||||
"Search(query: str) - useful for when you need to answer questions about current events\n",
|
||||
"{'query': {'title': 'Query', 'type': 'string'}}\n"
|
||||
"6\n",
|
||||
"10\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(search.name)\n",
|
||||
"print(search.description)\n",
|
||||
"print(search.args)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9b560f7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also define a custom `args_schema` to provide more information about inputs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "712c1967",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CalculatorInput(BaseModel):\n",
|
||||
" a: int = Field(description=\"first number\")\n",
|
||||
" b: int = Field(description=\"second number\")\n",
|
||||
"from langchain_core.tools import StructuredTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
@@ -365,185 +373,223 @@
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"calculator = StructuredTool.from_function(\n",
|
||||
" func=multiply,\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" description=\"multiply numbers\",\n",
|
||||
" args_schema=CalculatorInput,\n",
|
||||
" return_direct=True,\n",
|
||||
" # coroutine= ... <- you can specify an async method if desired as well\n",
|
||||
")"
|
||||
"calculator = StructuredTool.from_function(func=multiply)\n",
|
||||
"\n",
|
||||
"print(calculator.invoke({\"a\": 2, \"b\": 3}))\n",
|
||||
"print(\n",
|
||||
" await calculator.ainvoke({\"a\": 2, \"b\": 5})\n",
|
||||
") # Uses default LangChain async implementation incurs small overhead"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "f634081e",
|
||||
"execution_count": 9,
|
||||
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Calculator\n",
|
||||
"Calculator(a: int, b: int) -> int - multiply numbers\n",
|
||||
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
|
||||
"6\n",
|
||||
"10\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(calculator.name)\n",
|
||||
"print(calculator.description)\n",
|
||||
"print(calculator.args)"
|
||||
"from langchain_core.tools import StructuredTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def amultiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"calculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)\n",
|
||||
"\n",
|
||||
"print(calculator.invoke({\"a\": 2, \"b\": 3}))\n",
|
||||
"print(\n",
|
||||
" await calculator.ainvoke({\"a\": 2, \"b\": 5})\n",
|
||||
") # Uses use provided amultiply without additional overhead"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f1da459d",
|
||||
"id": "c80ffdaa-e4ba-4a70-8500-32bf4f60cc1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You should not and cannot use `.invoke` when providing only an async definition."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Raised not implemented error. You should not be doing this.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"@tool\n",
|
||||
"async def multiply(a: int, b: int) -> int:\n",
|
||||
" \"\"\"Multiply two numbers.\"\"\"\n",
|
||||
" return a * b\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" multiply.invoke({\"a\": 2, \"b\": 3})\n",
|
||||
"except NotImplementedError:\n",
|
||||
" print(\"Raised not implemented error. You should not be doing this.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f9c746a7-88d7-4afb-bcb8-0e98b891e8b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Handling Tool Errors \n",
|
||||
"When a tool encounters an error and the exception is not caught, the agent will stop executing. If you want the agent to continue execution, you can raise a `ToolException` and set `handle_tool_error` accordingly. \n",
|
||||
"\n",
|
||||
"When `ToolException` is thrown, the agent will not stop working, but will handle the exception according to the `handle_tool_error` variable of the tool, and the processing result will be returned to the agent as observation, and printed in red.\n",
|
||||
"If you're using tools with agents, you will likely need an error handling strategy, so the agent can recover from the error and continue execution.\n",
|
||||
"\n",
|
||||
"You can set `handle_tool_error` to `True`, set it a unified string value, or set it as a function. If it's set as a function, the function should take a `ToolException` as a parameter and return a `str` value.\n",
|
||||
"A simple strategy is to throw a `ToolException` from inside the tool and specify an error handler using `handle_tool_error`. \n",
|
||||
"\n",
|
||||
"When the error handler is specified, the exception will be caught and the error handler will decide which output to return from the tool.\n",
|
||||
"\n",
|
||||
"You can set `handle_tool_error` to `True`, a string value, or a function. If it's a function, the function should take a `ToolException` as a parameter and return a value.\n",
|
||||
"\n",
|
||||
"Please note that only raising a `ToolException` won't be effective. You need to first set the `handle_tool_error` of the tool because its default value is `False`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f8bf4668",
|
||||
"execution_count": 11,
|
||||
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.tools import ToolException\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def search_tool1(s: str):\n",
|
||||
" raise ToolException(\"The search tool1 is not available.\")"
|
||||
"def get_weather(city: str) -> int:\n",
|
||||
" \"\"\"Get weather for the given city.\"\"\"\n",
|
||||
" raise ToolException(f\"Error: There is no city by the name of {city}.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7fb56757",
|
||||
"id": "9d93b217-1d44-4d31-8956-db9ea680ff4f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, let's see what happens if we don't set `handle_tool_error` - it will error."
|
||||
"Here's an example with the default `handle_tool_error=True` behavior."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "f3dfbcb0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "ToolException",
|
||||
"evalue": "The search tool1 is not available.",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mToolException\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[58], line 7\u001b[0m\n\u001b[1;32m 1\u001b[0m search \u001b[38;5;241m=\u001b[39m StructuredTool\u001b[38;5;241m.\u001b[39mfrom_function(\n\u001b[1;32m 2\u001b[0m func\u001b[38;5;241m=\u001b[39msearch_tool1,\n\u001b[1;32m 3\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSearch_tool1\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 4\u001b[0m description\u001b[38;5;241m=\u001b[39mdescription,\n\u001b[1;32m 5\u001b[0m )\n\u001b[0;32m----> 7\u001b[0m \u001b[43msearch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtest\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/libs/core/langchain_core/tools.py:344\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, **kwargs)\u001b[0m\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_tool_error:\n\u001b[1;32m 343\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_tool_error(e)\n\u001b[0;32m--> 344\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_tool_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 346\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e\u001b[38;5;241m.\u001b[39margs:\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/libs/core/langchain_core/tools.py:337\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, **kwargs)\u001b[0m\n\u001b[1;32m 334\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 335\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 336\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m--> 337\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_run\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtool_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtool_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 338\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 340\u001b[0m )\n\u001b[1;32m 341\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ToolException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_tool_error:\n",
|
||||
"File \u001b[0;32m~/workplace/langchain/libs/core/langchain_core/tools.py:631\u001b[0m, in \u001b[0;36mStructuredTool._run\u001b[0;34m(self, run_manager, *args, **kwargs)\u001b[0m\n\u001b[1;32m 622\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc:\n\u001b[1;32m 623\u001b[0m new_argument_supported \u001b[38;5;241m=\u001b[39m signature(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcallbacks\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 624\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m (\n\u001b[1;32m 625\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc(\n\u001b[1;32m 626\u001b[0m \u001b[38;5;241m*\u001b[39margs,\n\u001b[1;32m 627\u001b[0m callbacks\u001b[38;5;241m=\u001b[39mrun_manager\u001b[38;5;241m.\u001b[39mget_child() \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 628\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 629\u001b[0m )\n\u001b[1;32m 630\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_argument_supported\n\u001b[0;32m--> 631\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 632\u001b[0m )\n\u001b[1;32m 633\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool does not support sync\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
|
||||
"Cell \u001b[0;32mIn[55], line 5\u001b[0m, in \u001b[0;36msearch_tool1\u001b[0;34m(s)\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21msearch_tool1\u001b[39m(s: \u001b[38;5;28mstr\u001b[39m):\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ToolException(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThe search tool1 is not available.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
|
||||
"\u001b[0;31mToolException\u001b[0m: The search tool1 is not available."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search = StructuredTool.from_function(\n",
|
||||
" func=search_tool1,\n",
|
||||
" name=\"Search_tool1\",\n",
|
||||
" description=\"A bad tool\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"search.run(\"test\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d2475acd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's set `handle_tool_error` to be True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "ab81e0f0",
|
||||
"execution_count": 12,
|
||||
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The search tool1 is not available.'"
|
||||
"'Error: There is no city by the name of foobar.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 59,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search = StructuredTool.from_function(\n",
|
||||
" func=search_tool1,\n",
|
||||
" name=\"Search_tool1\",\n",
|
||||
" description=\"A bad tool\",\n",
|
||||
"get_weather_tool = StructuredTool.from_function(\n",
|
||||
" func=get_weather,\n",
|
||||
" handle_tool_error=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"search.run(\"test\")"
|
||||
"get_weather_tool.invoke({\"city\": \"foobar\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dafbbcbe",
|
||||
"id": "f91d6dc0-3271-4adc-a155-21f2e62ffa56",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also define a custom way to handle the tool error"
|
||||
"We can set `handle_tool_error` to a string that will always be returned."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"id": "ad16fbcf",
|
||||
"execution_count": 13,
|
||||
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The following errors occurred during tool execution:The search tool1 is not available.Please try another tool.'"
|
||||
"\"There is no such city, but it's probably above 0K there!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 60,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"get_weather_tool = StructuredTool.from_function(\n",
|
||||
" func=get_weather,\n",
|
||||
" handle_tool_error=\"There is no such city, but it's probably above 0K there!\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"get_weather_tool.invoke({\"city\": \"foobar\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b0a640c1-e08f-4413-83b6-f599f304935f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Handling the error using a function:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def _handle_error(error: ToolException) -> str:\n",
|
||||
" return (\n",
|
||||
" \"The following errors occurred during tool execution:\"\n",
|
||||
" + error.args[0]\n",
|
||||
" + \"Please try another tool.\"\n",
|
||||
" )\n",
|
||||
" return f\"The following errors occurred during tool execution: `{error.args[0]}`\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"search = StructuredTool.from_function(\n",
|
||||
" func=search_tool1,\n",
|
||||
" name=\"Search_tool1\",\n",
|
||||
" description=\"A bad tool\",\n",
|
||||
"get_weather_tool = StructuredTool.from_function(\n",
|
||||
" func=get_weather,\n",
|
||||
" handle_tool_error=_handle_error,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"search.run(\"test\")"
|
||||
"get_weather_tool.invoke({\"city\": \"foobar\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -563,7 +609,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"\n",
|
||||
"- Verbose Mode: This adds print statements for \"important\" events in your chain.\n",
|
||||
"- Debug Mode: This add logging statements for ALL events in your chain.\n",
|
||||
"- LangSmith Tracing: This logs events to [LangSmith](/docs/langsmith/) to allow for visualization there.\n",
|
||||
"- LangSmith Tracing: This logs events to [LangSmith](https://docs.smith.langchain.com/) to allow for visualization there.\n",
|
||||
"\n",
|
||||
"| | Verbose Mode | Debug Mode | LangSmith Tracing |\n",
|
||||
"|------------------------|--------------|------------|-------------------|\n",
|
||||
|
||||
@@ -69,6 +69,17 @@
|
||||
"Once we have loaded PDFs into LangChain `Document` objects, we can index them (e.g., a RAG application) in the usual way:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b932bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install faiss-cpu \n",
|
||||
"# use `pip install faiss-gpu` for CUDA GPU support"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -463,7 +474,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"cur_idx = -1\n",
|
||||
"semantic_snippets = []\n",
|
||||
|
||||
190
docs/docs/how_to/dynamic_chain.ipynb
Normal file
190
docs/docs/how_to/dynamic_chain.ipynb
Normal file
@@ -0,0 +1,190 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "50d57bf2-7104-4570-b3e5-90fd71e1bea1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create a dynamic (self-constructing) chain\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following:\n",
|
||||
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
|
||||
"- [How to turn any function into a runnable](/docs/how_to/functions)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Sometimes we want to construct parts of a chain at runtime, depending on the chain inputs ([routing](/docs/how_to/routing/) is the most common example of this). We can create dynamic chains like this using a very useful property of RunnableLambda's, which is that if a RunnableLambda returns a Runnable, that Runnable is itself invoked. Let's see an example.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "406bffc2-86d0-4cb9-9262-5c1e3442397a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "0ae6692b-983e-40b8-aa2a-6c078d945b9e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"According to the context provided, Egypt's population in 2024 is estimated to be about 111 million.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import Runnable, RunnablePassthrough, chain\n",
|
||||
"\n",
|
||||
"contextualize_instructions = \"\"\"Convert the latest user question into a standalone question given the chat history. Don't answer the question, return the question and nothing else (no descriptive text).\"\"\"\n",
|
||||
"contextualize_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", contextualize_instructions),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"contextualize_question = contextualize_prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"qa_instructions = (\n",
|
||||
" \"\"\"Answer the user question given the following context:\\n\\n{context}.\"\"\"\n",
|
||||
")\n",
|
||||
"qa_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", qa_instructions), (\"human\", \"{question}\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def contextualize_if_needed(input_: dict) -> Runnable:\n",
|
||||
" if input_.get(\"chat_history\"):\n",
|
||||
" # NOTE: This is returning another Runnable, not an actual output.\n",
|
||||
" return contextualize_question\n",
|
||||
" else:\n",
|
||||
" return RunnablePassthrough()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
"def fake_retriever(input_: dict) -> str:\n",
|
||||
" return \"egypt's population in 2024 is about 111 million\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(question=contextualize_if_needed).assign(\n",
|
||||
" context=fake_retriever\n",
|
||||
" )\n",
|
||||
" | qa_prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"what about egypt\",\n",
|
||||
" \"chat_history\": [\n",
|
||||
" (\"human\", \"what's the population of indonesia\"),\n",
|
||||
" (\"ai\", \"about 276 million\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5076ddb4-4a99-47ad-b549-8ac27ca3e2c6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The key here is that `contextualize_if_needed` returns another Runnable and not an actual output. This returned Runnable is itself run when the full chain is executed.\n",
|
||||
"\n",
|
||||
"Looking at the trace we can see that, since we passed in chat_history, we executed the contextualize_question chain as part of the full chain: https://smith.langchain.com/public/9e0ae34c-4082-4f3f-beed-34a2a2f4c991/r"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fe6ca44-a643-4859-a290-be68403f51f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the streaming, batching, etc. capabilities of the returned Runnable are all preserved"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "6def37fa-5105-4090-9b07-77cb488ecd9c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"What\n",
|
||||
" is\n",
|
||||
" the\n",
|
||||
" population\n",
|
||||
" of\n",
|
||||
" Egypt\n",
|
||||
"?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in contextualize_if_needed.stream(\n",
|
||||
" {\n",
|
||||
" \"question\": \"what about egypt\",\n",
|
||||
" \"chat_history\": [\n",
|
||||
" (\"human\", \"what's the population of indonesia\"),\n",
|
||||
" (\"ai\", \"about 276 million\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
"):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -75,6 +75,31 @@ Otherwise you can initialize without any params:
|
||||
from langchain_cohere import CohereEmbeddings
|
||||
|
||||
embeddings_model = CohereEmbeddings()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="huggingface" label="Hugging Face">
|
||||
|
||||
To start we'll need to install the Hugging Face partner package:
|
||||
|
||||
```bash
|
||||
pip install langchain-huggingface
|
||||
```
|
||||
|
||||
You can then load any [Sentence Transformers model](https://huggingface.co/models?library=sentence-transformers) from the Hugging Face Hub.
|
||||
|
||||
```python
|
||||
from langchain_huggingface import HuggingFaceEmbeddings
|
||||
|
||||
embeddings_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
||||
```
|
||||
|
||||
You can also leave the `model_name` blank to use the default [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) model.
|
||||
|
||||
```python
|
||||
from langchain_huggingface import HuggingFaceEmbeddings
|
||||
|
||||
embeddings_model = HuggingFaceEmbeddings()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
@@ -4,13 +4,17 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to create an Ensemble Retriever\n",
|
||||
"# How to combine results from multiple retrievers\n",
|
||||
"\n",
|
||||
"The `EnsembleRetriever` takes a list of retrievers as input and ensemble the results of their `get_relevant_documents()` methods and rerank the results based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n",
|
||||
"The [EnsembleRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n",
|
||||
"\n",
|
||||
"By leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm. \n",
|
||||
"\n",
|
||||
"The most common pattern is to combine a sparse retriever (like BM25) with a dense retriever (like embedding similarity), because their strengths are complementary. It is also known as \"hybrid search\". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity."
|
||||
"The most common pattern is to combine a sparse retriever (like BM25) with a dense retriever (like embedding similarity), because their strengths are complementary. It is also known as \"hybrid search\". The sparse retriever is good at finding relevant documents based on keywords, while the dense retriever is good at finding relevant documents based on semantic similarity.\n",
|
||||
"\n",
|
||||
"## Basic usage\n",
|
||||
"\n",
|
||||
"Below we demonstrate ensembling of a [BM25Retriever](https://api.python.langchain.com/en/latest/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://api.python.langchain.com/en/latest/vectorstores/langchain_community.vectorstores.faiss.FAISS.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -24,22 +28,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import EnsembleRetriever\n",
|
||||
"from langchain_community.retrievers import BM25Retriever\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"doc_list_1 = [\n",
|
||||
" \"I like apples\",\n",
|
||||
" \"I like oranges\",\n",
|
||||
@@ -71,19 +68,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='You like apples', metadata={'source': 2}),\n",
|
||||
" Document(page_content='I like apples', metadata={'source': 1}),\n",
|
||||
" Document(page_content='You like oranges', metadata={'source': 2}),\n",
|
||||
" Document(page_content='Apples and oranges are fruits', metadata={'source': 1})]"
|
||||
"[Document(page_content='I like apples', metadata={'source': 1}),\n",
|
||||
" Document(page_content='You like apples', metadata={'source': 2}),\n",
|
||||
" Document(page_content='Apples and oranges are fruits', metadata={'source': 1}),\n",
|
||||
" Document(page_content='You like oranges', metadata={'source': 2})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -99,24 +96,17 @@
|
||||
"source": [
|
||||
"## Runtime Configuration\n",
|
||||
"\n",
|
||||
"We can also configure the retrievers at runtime. In order to do this, we need to mark the fields as configurable"
|
||||
"We can also configure the individual retrievers at runtime using [configurable fields](/docs/how_to/configure). Below we update the \"top-k\" parameter for the FAISS retriever specifically:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import ConfigurableField"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"\n",
|
||||
"faiss_retriever = faiss_vectorstore.as_retriever(\n",
|
||||
" search_kwargs={\"k\": 2}\n",
|
||||
").configurable_fields(\n",
|
||||
@@ -125,15 +115,8 @@
|
||||
" name=\"Search Kwargs\",\n",
|
||||
" description=\"The search kwargs to use\",\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
")\n",
|
||||
"\n",
|
||||
"ensemble_retriever = EnsembleRetriever(\n",
|
||||
" retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5]\n",
|
||||
")"
|
||||
@@ -141,9 +124,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='I like apples', metadata={'source': 1}),\n",
|
||||
" Document(page_content='You like apples', metadata={'source': 2}),\n",
|
||||
" Document(page_content='Apples and oranges are fruits', metadata={'source': 1})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"config = {\"configurable\": {\"search_kwargs_faiss\": {\"k\": 1}}}\n",
|
||||
"docs = ensemble_retriever.invoke(\"apples\", config=config)\n",
|
||||
@@ -181,7 +177,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"examples = [\n",
|
||||
" {\"input\": \"hi\", \"output\": \"ciao\"},\n",
|
||||
" {\"input\": \"bye\", \"output\": \"arrivaderci\"},\n",
|
||||
" {\"input\": \"bye\", \"output\": \"arrivederci\"},\n",
|
||||
" {\"input\": \"soccer\", \"output\": \"calcio\"},\n",
|
||||
"]"
|
||||
]
|
||||
@@ -133,7 +133,7 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'input': 'bye', 'output': 'arrivaderci'}]"
|
||||
"[{'input': 'bye', 'output': 'arrivederci'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
@@ -209,7 +209,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Translate the following words from English to Italain:\n",
|
||||
"Translate the following words from English to Italian:\n",
|
||||
"\n",
|
||||
"Input: hand -> Output: mano\n",
|
||||
"\n",
|
||||
@@ -222,7 +222,7 @@
|
||||
" example_selector=example_selector,\n",
|
||||
" example_prompt=example_prompt,\n",
|
||||
" suffix=\"Input: {input} -> Output:\",\n",
|
||||
" prefix=\"Translate the following words from English to Italain:\",\n",
|
||||
" prefix=\"Translate the following words from English to Italian:\",\n",
|
||||
" input_variables=[\"input\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import LengthBasedExampleSelector\n",
|
||||
"from langchain_core.example_selectors import LengthBasedExampleSelector\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"\n",
|
||||
"# Examples of a pretend task of creating antonyms.\n",
|
||||
"examples = [\n",
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import (\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.example_selectors import (\n",
|
||||
" MaxMarginalRelevanceExampleSelector,\n",
|
||||
" SemanticSimilarityExampleSelector,\n",
|
||||
")\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
|
||||
@@ -19,8 +19,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector.ngram_overlap import NGramOverlapExampleSelector\n",
|
||||
"from langchain_community.example_selectors import NGramOverlapExampleSelector\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"input\", \"output\"],\n",
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
|
||||
@@ -128,7 +128,7 @@
|
||||
" # Having a good description can help improve extraction results.\n",
|
||||
" name: Optional[str] = Field(..., description=\"The name of the person\")\n",
|
||||
" hair_color: Optional[str] = Field(\n",
|
||||
" ..., description=\"The color of the peron's eyes if known\"\n",
|
||||
" ..., description=\"The color of the person's hair if known\"\n",
|
||||
" )\n",
|
||||
" height_in_meters: Optional[str] = Field(..., description=\"Height in METERs\")\n",
|
||||
"\n",
|
||||
|
||||
@@ -69,7 +69,7 @@
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
|
||||
"\n",
|
||||
|
||||
@@ -1,11 +1,21 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "018f3868-e60d-4db6-a1c6-c6633c66b1f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"keywords: [LCEL, fallbacks]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "19c9cbd6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Fallbacks\n",
|
||||
"# How to add fallbacks to a runnable\n",
|
||||
"\n",
|
||||
"When working with language models, you may often encounter issues from the underlying APIs, whether these be rate limiting or downtime. Therefore, as you go to move your LLM applications into production it becomes more and more important to safeguard against these. That's why we've introduced the concept of fallbacks. \n",
|
||||
"\n",
|
||||
@@ -43,7 +53,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
@@ -80,8 +90,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Note that we set max_retries = 0 to avoid retrying on RateLimits, etc\n",
|
||||
"openai_llm = ChatOpenAI(max_retries=0)\n",
|
||||
"anthropic_llm = ChatAnthropic()\n",
|
||||
"openai_llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", max_retries=0)\n",
|
||||
"anthropic_llm = ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
|
||||
"llm = openai_llm.with_fallbacks([anthropic_llm])"
|
||||
]
|
||||
},
|
||||
@@ -447,7 +457,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate.from_template(\"Question: {question}\\n{answer}\")"
|
||||
]
|
||||
@@ -222,7 +222,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.few_shot import FewShotPromptTemplate\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = FewShotPromptTemplate(\n",
|
||||
" examples=examples,\n",
|
||||
@@ -282,8 +282,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"example_selector = SemanticSimilarityExampleSelector.from_examples(\n",
|
||||
|
||||
@@ -88,10 +88,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" FewShotChatMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
|
||||
@@ -218,8 +215,8 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
@@ -305,10 +302,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" FewShotChatMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"\n",
|
||||
"# Define the few-shot prompt.\n",
|
||||
"few_shot_prompt = FewShotChatMessagePromptTemplate(\n",
|
||||
|
||||
203
docs/docs/how_to/filter_messages.ipynb
Normal file
203
docs/docs/how_to/filter_messages.ipynb
Normal file
@@ -0,0 +1,203 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e389175d-8a65-4f0d-891c-dbdfabb3c3ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to filter messages\n",
|
||||
"\n",
|
||||
"In more complex chains and agents we might track state with a list of messages. This list can start to accumulate messages from multiple different models, speakers, sub-chains, etc., and we may only want to pass subsets of this full list of messages to each model call in the chain/agent.\n",
|
||||
"\n",
|
||||
"The `filter_messages` utility makes it easy to filter messages by type, id, or name.\n",
|
||||
"\n",
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f4ad2fd3-3cab-40d4-a989-972115865b8b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='example input', name='example_user', id='2'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" filter_messages,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\"you are a good assistant\", id=\"1\"),\n",
|
||||
" HumanMessage(\"example input\", id=\"2\", name=\"example_user\"),\n",
|
||||
" AIMessage(\"example output\", id=\"3\", name=\"example_assistant\"),\n",
|
||||
" HumanMessage(\"real input\", id=\"4\", name=\"bob\"),\n",
|
||||
" AIMessage(\"real output\", id=\"5\", name=\"alice\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"filter_messages(messages, include_types=\"human\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7b663a1e-a8ae-453e-a072-8dd75dfab460",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content='you are a good assistant', id='1'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_messages(messages, exclude_names=[\"example_user\", \"example_assistant\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "db170e46-03f8-4710-b967-23c70c3ac054",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='example input', name='example_user', id='2'),\n",
|
||||
" HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_messages(messages, include_types=[HumanMessage, AIMessage], exclude_ids=[\"3\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b7c4e5ad-d1b4-4c18-b250-864adde8f0dd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`filter_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "675f8f79-db39-401c-a582-1df2478cba30",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=[], response_metadata={'id': 'msg_01Wz7gBHahAwkZ1KCBNtXmwA', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 3}}, id='run-b5d8a3fe-004f-4502-a071-a6c025031827-0', usage_metadata={'input_tokens': 16, 'output_tokens': 3, 'total_tokens': 19})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# pip install -U langchain-anthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)\n",
|
||||
"# Notice we don't pass in messages. This creates\n",
|
||||
"# a RunnableLambda that takes messages as input\n",
|
||||
"filter_ = filter_messages(exclude_names=[\"example_user\", \"example_assistant\"])\n",
|
||||
"chain = filter_ | llm\n",
|
||||
"chain.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4133ab28-f49c-480f-be92-b51eb6559153",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace we can see that before the messages are passed to the model they are filtered: https://smith.langchain.com/public/f808a724-e072-438e-9991-657cc9e7e253/r\n",
|
||||
"\n",
|
||||
"Looking at just the filter_, we can see that it's a Runnable object that can be invoked like all Runnables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c090116a-1fef-43f6-a178-7265dff9db00",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='real input', name='bob', id='4'),\n",
|
||||
" AIMessage(content='real output', name='alice', id='5')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"filter_.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff339066-d424-4042-8cca-cd4b007c1a8e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For a complete description of all arguments head to the API reference: https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.filter_messages.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -167,7 +167,7 @@
|
||||
"source": [
|
||||
"Above, the `@chain` decorator is used to convert `custom_chain` into a runnable, which we invoke with the `.invoke()` method.\n",
|
||||
"\n",
|
||||
"If you are using a tracing with [LangSmith](/docs/langsmith/), you should see a `custom_chain` trace in there, with the calls to OpenAI nested underneath.\n",
|
||||
"If you are using a tracing with [LangSmith](https://docs.smith.langchain.com/), you should see a `custom_chain` trace in there, with the calls to OpenAI nested underneath.\n",
|
||||
"\n",
|
||||
"## Automatic coercion in chains\n",
|
||||
"\n",
|
||||
@@ -300,7 +300,11 @@
|
||||
"id": "922b48bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Streaming\n",
|
||||
"## Streaming\n",
|
||||
"\n",
|
||||
":::{.callout-note}\n",
|
||||
"[RunnableLambda](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html) is best suited for code that does not need to support streaming. If you need to support streaming (i.e., be able to operate on chunks of inputs and yield chunks of outputs), use [RunnableGenerator](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableGenerator.html) instead as in the example below.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a chain.\n",
|
||||
"\n",
|
||||
|
||||
@@ -300,7 +300,7 @@
|
||||
"Entities in the question map to the following database values:\n",
|
||||
"{entities_list}\n",
|
||||
"Question: {question}\n",
|
||||
"Cypher query:\"\"\" # noqa: E501\n",
|
||||
"Cypher query:\"\"\"\n",
|
||||
"\n",
|
||||
"cypher_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@@ -377,7 +377,7 @@
|
||||
"response_template = \"\"\"Based on the the question, Cypher query, and Cypher response, write a natural language response:\n",
|
||||
"Question: {question}\n",
|
||||
"Cypher query: {query}\n",
|
||||
"Cypher Response: {response}\"\"\" # noqa: E501\n",
|
||||
"Cypher Response: {response}\"\"\"\n",
|
||||
"\n",
|
||||
"response_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
|
||||
@@ -177,14 +177,13 @@
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.callbacks import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain.tools import BaseTool\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"description_query = \"\"\"\n",
|
||||
"MATCH (m:Movie|Person)\n",
|
||||
@@ -227,14 +226,13 @@
|
||||
"source": [
|
||||
"from typing import Optional, Type\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_core.callbacks import (\n",
|
||||
" AsyncCallbackManagerForToolRun,\n",
|
||||
" CallbackManagerForToolRun,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain.tools import BaseTool\n",
|
||||
"from langchain_core.tools import BaseTool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class InformationInput(BaseModel):\n",
|
||||
@@ -287,8 +285,8 @@
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.utils.function_calling import convert_to_openai_function\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
@@ -7,39 +7,42 @@ sidebar_class_name: hidden
|
||||
|
||||
Here you’ll find answers to “How do I….?” types of questions.
|
||||
These guides are *goal-oriented* and *concrete*; they're meant to help you complete a specific task.
|
||||
For conceptual explanations see [Conceptual Guides](/docs/concepts/).
|
||||
For conceptual explanations see the [Conceptual guide](/docs/concepts/).
|
||||
For end-to-end walkthroughs see [Tutorials](/docs/tutorials).
|
||||
For comprehensive descriptions of every class and function see [API Reference](https://api.python.langchain.com/en/latest/).
|
||||
For comprehensive descriptions of every class and function see the [API Reference](https://api.python.langchain.com/en/latest/).
|
||||
|
||||
## Installation
|
||||
|
||||
- [How to: install LangChain packages](/docs/how_to/installation/)
|
||||
- [How to: use LangChain with different Pydantic versions](/docs/how_to/pydantic_compatibility)
|
||||
|
||||
## Key features
|
||||
|
||||
This highlights functionality that is core to using LangChain.
|
||||
|
||||
- [How to: return structured data from an LLM](/docs/how_to/structured_output/)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: return structured data from a model](/docs/how_to/structured_output/)
|
||||
- [How to: use a model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: stream runnables](/docs/how_to/streaming)
|
||||
- [How to: debug your LLM apps](/docs/how_to/debugging/)
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
|
||||
LangChain Expression Language is a way to create arbitrary custom chains. It is built on the Runnable protocol.
|
||||
[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol.
|
||||
|
||||
[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives.
|
||||
|
||||
- [How to: chain runnables](/docs/how_to/sequence)
|
||||
- [How to: stream runnables](/docs/how_to/streaming)
|
||||
- [How to: invoke runnables in parallel](/docs/how_to/parallel/)
|
||||
- [How to: attach runtime arguments to a runnable](/docs/how_to/binding/)
|
||||
- [How to: run custom functions](/docs/how_to/functions)
|
||||
- [How to: pass through arguments from one step to the next](/docs/how_to/passthrough)
|
||||
- [How to: add values to a chain's state](/docs/how_to/assign)
|
||||
- [How to: configure a chain at runtime](/docs/how_to/configure)
|
||||
- [How to: add message history](/docs/how_to/message_history)
|
||||
- [How to: route execution within a chain](/docs/how_to/routing)
|
||||
- [How to: add default invocation args to runnables](/docs/how_to/binding/)
|
||||
- [How to: turn any function into a runnable](/docs/how_to/functions)
|
||||
- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough)
|
||||
- [How to: configure runnable behavior at runtime](/docs/how_to/configure)
|
||||
- [How to: add message history (memory) to a chain](/docs/how_to/message_history)
|
||||
- [How to: route between sub-chains](/docs/how_to/routing)
|
||||
- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/)
|
||||
- [How to: inspect runnables](/docs/how_to/inspect)
|
||||
- [How to: add fallbacks](/docs/how_to/fallbacks)
|
||||
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
|
||||
|
||||
## Components
|
||||
|
||||
@@ -47,7 +50,7 @@ These are the core building blocks you can use when building applications.
|
||||
|
||||
### Prompt templates
|
||||
|
||||
Prompt Templates are responsible for formatting user input into a format that can be passed to a language model.
|
||||
[Prompt Templates](/docs/concepts/#prompt-templates) are responsible for formatting user input into a format that can be passed to a language model.
|
||||
|
||||
- [How to: use few shot examples](/docs/how_to/few_shot_examples)
|
||||
- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/)
|
||||
@@ -56,7 +59,7 @@ Prompt Templates are responsible for formatting user input into a format that ca
|
||||
|
||||
### Example selectors
|
||||
|
||||
Example Selectors are responsible for selecting the correct few shot examples to pass to the prompt.
|
||||
[Example Selectors](/docs/concepts/#example-selectors) are responsible for selecting the correct few shot examples to pass to the prompt.
|
||||
|
||||
- [How to: use example selectors](/docs/how_to/example_selectors)
|
||||
- [How to: select examples by length](/docs/how_to/example_selectors_length_based)
|
||||
@@ -66,7 +69,7 @@ Example Selectors are responsible for selecting the correct few shot examples to
|
||||
|
||||
### Chat models
|
||||
|
||||
Chat Models are newer forms of language models that take messages in and output a message.
|
||||
[Chat Models](/docs/concepts/#chat-models) are newer forms of language models that take messages in and output a message.
|
||||
|
||||
- [How to: do function/tool calling](/docs/how_to/tool_calling)
|
||||
- [How to: get models to return structured output](/docs/how_to/structured_output)
|
||||
@@ -76,10 +79,19 @@ Chat Models are newer forms of language models that take messages in and output
|
||||
- [How to: stream a response back](/docs/how_to/chat_streaming)
|
||||
- [How to: track token usage](/docs/how_to/chat_token_usage_tracking)
|
||||
- [How to: track response metadata across providers](/docs/how_to/response_metadata)
|
||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||
|
||||
### Messages
|
||||
|
||||
[Messages](/docs/concepts/#messages) are the input and output of chat models. They have some `content` and a `role`, which describes the source of the message.
|
||||
|
||||
- [How to: trim messages](/docs/how_to/trim_messages/)
|
||||
- [How to: filter messages](/docs/how_to/filter_messages/)
|
||||
- [How to: merge consecutive messages of the same type](/docs/how_to/merge_message_runs/)
|
||||
|
||||
### LLMs
|
||||
|
||||
What LangChain calls LLMs are older forms of language models that take a string in and output a string.
|
||||
What LangChain calls [LLMs](/docs/concepts/#llms) are older forms of language models that take a string in and output a string.
|
||||
|
||||
- [How to: cache model responses](/docs/how_to/llm_caching)
|
||||
- [How to: create a custom LLM class](/docs/how_to/custom_llm)
|
||||
@@ -89,7 +101,7 @@ What LangChain calls LLMs are older forms of language models that take a string
|
||||
|
||||
### Output parsers
|
||||
|
||||
Output Parsers are responsible for taking the output of an LLM and parsing into more structured format.
|
||||
[Output Parsers](/docs/concepts/#output-parsers) are responsible for taking the output of an LLM and parsing into more structured format.
|
||||
|
||||
- [How to: use output parsers to parse an LLM response into structured format](/docs/how_to/output_parser_structured)
|
||||
- [How to: parse JSON output](/docs/how_to/output_parser_json)
|
||||
@@ -101,7 +113,7 @@ Output Parsers are responsible for taking the output of an LLM and parsing into
|
||||
|
||||
### Document loaders
|
||||
|
||||
Document Loaders are responsible for loading documents from a variety of sources.
|
||||
[Document Loaders](/docs/concepts/#document-loaders) are responsible for loading documents from a variety of sources.
|
||||
|
||||
- [How to: load CSV data](/docs/how_to/document_loader_csv)
|
||||
- [How to: load data from a directory](/docs/how_to/document_loader_directory)
|
||||
@@ -114,7 +126,7 @@ Document Loaders are responsible for loading documents from a variety of sources
|
||||
|
||||
### Text splitters
|
||||
|
||||
Text Splitters take a document and split into chunks that can be used for retrieval.
|
||||
[Text Splitters](/docs/concepts/#text-splitters) take a document and split into chunks that can be used for retrieval.
|
||||
|
||||
- [How to: recursively split text](/docs/how_to/recursive_text_splitter)
|
||||
- [How to: split by HTML headers](/docs/how_to/HTML_header_metadata_splitter)
|
||||
@@ -128,20 +140,20 @@ Text Splitters take a document and split into chunks that can be used for retrie
|
||||
|
||||
### Embedding models
|
||||
|
||||
Embedding Models take a piece of text and create a numerical representation of it.
|
||||
[Embedding Models](/docs/concepts/#embedding-models) take a piece of text and create a numerical representation of it.
|
||||
|
||||
- [How to: embed text data](/docs/how_to/embed_text)
|
||||
- [How to: cache embedding results](/docs/how_to/caching_embeddings)
|
||||
|
||||
### Vector stores
|
||||
|
||||
Vector stores are databases that can efficiently store and retrieve embeddings.
|
||||
[Vector stores](/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings.
|
||||
|
||||
- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores)
|
||||
|
||||
### Retrievers
|
||||
|
||||
Retrievers are responsible for taking a query and returning relevant documents.
|
||||
[Retrievers](/docs/concepts/#retrievers) are responsible for taking a query and returning relevant documents.
|
||||
|
||||
- [How to: use a vector store to retrieve data](/docs/how_to/vectorstore_retriever)
|
||||
- [How to: generate multiple queries to retrieve data for](/docs/how_to/MultiQueryRetriever)
|
||||
@@ -149,7 +161,7 @@ Retrievers are responsible for taking a query and returning relevant documents.
|
||||
- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
|
||||
- [How to: add similarity scores to retriever results](/docs/how_to/add_scores_retriever)
|
||||
- [How to: combine the results from multiple retrievers](/docs/how_to/ensemble_retriever)
|
||||
- [How to: reorder retrieved results to put most relevant documents not in the middle](/docs/how_to/long_context_reorder)
|
||||
- [How to: reorder retrieved results to mitigate the "lost in the middle" effect](/docs/how_to/long_context_reorder)
|
||||
- [How to: generate multiple embeddings per document](/docs/how_to/multi_vector)
|
||||
- [How to: retrieve the whole document for a chunk](/docs/how_to/parent_document_retriever)
|
||||
- [How to: generate metadata filters](/docs/how_to/self_query)
|
||||
@@ -164,19 +176,21 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
|
||||
|
||||
### Tools
|
||||
|
||||
LangChain Tools contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
|
||||
|
||||
- [How to: use LangChain tools](/docs/how_to/tools)
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
|
||||
- [How to: use LangChain toolkits](/docs/how_to/toolkits)
|
||||
- [How to: define a custom tool](/docs/how_to/custom_tools)
|
||||
- [How to: convert LangChain tools to OpenAI functions](/docs/how_to/tools_as_openai_functions)
|
||||
- [How to: use tools without function calling](/docs/how_to/tools_prompting)
|
||||
- [How to: let the LLM choose between multiple tools](/docs/how_to/tools_multiple)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: do parallel tool use](/docs/how_to/tools_parallel)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: call tools using multi-modal data](/docs/how_to/tool_calls_multi_modal)
|
||||
|
||||
### Multimodal
|
||||
|
||||
- [How to: pass multimodal data directly to models](/docs/how_to/multimodal_inputs/)
|
||||
- [How to: use multimodal prompts](/docs/how_to/multimodal_prompts/)
|
||||
|
||||
|
||||
### Agents
|
||||
|
||||
@@ -189,6 +203,16 @@ For in depth how-to guides for agents, please check out [LangGraph](https://gith
|
||||
- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor)
|
||||
- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent)
|
||||
|
||||
### Callbacks
|
||||
|
||||
[Callbacks](/docs/concepts/#callbacks) allow you to hook into the various stages of your LLM application's execution.
|
||||
|
||||
- [How to: pass in callbacks at runtime](/docs/how_to/callbacks_runtime)
|
||||
- [How to: attach callbacks to a module](/docs/how_to/callbacks_attach)
|
||||
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
|
||||
|
||||
### Custom
|
||||
|
||||
All of LangChain components can easily be extended to support your own versions.
|
||||
@@ -198,8 +222,11 @@ All of LangChain components can easily be extended to support your own versions.
|
||||
- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
|
||||
- [How to: write a custom document loader](/docs/how_to/document_loader_custom)
|
||||
- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
|
||||
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
|
||||
- [How to: define a custom tool](/docs/how_to/custom_tools)
|
||||
|
||||
### Serialization
|
||||
- [How to: save and load LangChain objects](/docs/how_to/serialization)
|
||||
|
||||
## Use cases
|
||||
|
||||
@@ -208,6 +235,7 @@ These guides cover use-case specific details.
|
||||
### Q&A with RAG
|
||||
|
||||
Retrieval Augmented Generation (RAG) is a way to connect LLMs to external sources of data.
|
||||
For a high-level tutorial on RAG, check out [this guide](/docs/tutorials/rag/).
|
||||
|
||||
- [How to: add chat history](/docs/how_to/qa_chat_history_how_to/)
|
||||
- [How to: stream](/docs/how_to/qa_streaming/)
|
||||
@@ -219,6 +247,7 @@ Retrieval Augmented Generation (RAG) is a way to connect LLMs to external source
|
||||
### Extraction
|
||||
|
||||
Extraction is when you use LLMs to extract structured information from unstructured text.
|
||||
For a high level tutorial on extraction, check out [this guide](/docs/tutorials/extraction/).
|
||||
|
||||
- [How to: use reference examples](/docs/how_to/extraction_examples/)
|
||||
- [How to: handle long text](/docs/how_to/extraction_long_text/)
|
||||
@@ -227,14 +256,17 @@ Extraction is when you use LLMs to extract structured information from unstructu
|
||||
### Chatbots
|
||||
|
||||
Chatbots involve using an LLM to have a conversation.
|
||||
For a high-level tutorial on building chatbots, check out [this guide](/docs/tutorials/chatbot/).
|
||||
|
||||
- [How to: manage memory](/docs/how_to/chatbots_memory)
|
||||
- [How to: do retrieval](/docs/how_to/chatbots_retrieval)
|
||||
- [How to: use tools](/docs/how_to/chatbots_tools)
|
||||
- [How to: manage large chat history](/docs/how_to/trim_messages/)
|
||||
|
||||
### Query analysis
|
||||
|
||||
Query Analysis is the task of using an LLM to generate a query to send to a retriever.
|
||||
For a high-level tutorial on query analysis, check out [this guide](/docs/tutorials/query_analysis/).
|
||||
|
||||
- [How to: add examples to the prompt](/docs/how_to/query_few_shot)
|
||||
- [How to: handle cases where no queries are generated](/docs/how_to/query_no_queries)
|
||||
@@ -246,6 +278,7 @@ Query Analysis is the task of using an LLM to generate a query to send to a retr
|
||||
### Q&A over SQL + CSV
|
||||
|
||||
You can use LLMs to do question answering over tabular data.
|
||||
For a high-level tutorial, check out [this guide](/docs/tutorials/sql_qa/).
|
||||
|
||||
- [How to: use prompting to improve results](/docs/how_to/sql_prompting)
|
||||
- [How to: do query validation](/docs/how_to/sql_query_checking)
|
||||
@@ -255,8 +288,33 @@ You can use LLMs to do question answering over tabular data.
|
||||
### Q&A over graph databases
|
||||
|
||||
You can use an LLM to do question answering over graph databases.
|
||||
For a high-level tutorial, check out [this guide](/docs/tutorials/graph/).
|
||||
|
||||
- [How to: map values to a database](/docs/how_to/graph_mapping)
|
||||
- [How to: add a semantic layer over the database](/docs/how_to/graph_semantic)
|
||||
- [How to: improve results with prompting](/docs/how_to/graph_prompting)
|
||||
- [How to: construct knowledge graphs](/docs/how_to/graph_constructing)
|
||||
|
||||
## [LangGraph](https://langchain-ai.github.io/langgraph)
|
||||
|
||||
LangGraph is an extension of LangChain aimed at
|
||||
building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
LangGraph documentation is currently hosted on a separate site.
|
||||
You can peruse [LangGraph how-to guides here](https://langchain-ai.github.io/langgraph/how-tos/).
|
||||
|
||||
## [LangSmith](https://docs.smith.langchain.com/)
|
||||
|
||||
LangSmith allows you to closely trace, monitor and evaluate your LLM application.
|
||||
It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build.
|
||||
|
||||
LangSmith documentation is hosted on a separate site.
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/).
|
||||
|
||||
### Evaluation
|
||||
<span data-heading-keywords="evaluation,evaluate"></span>
|
||||
|
||||
Evaluating performance is a vital part of building LLM-powered applications.
|
||||
LangSmith helps with every step of the process from creating a dataset to defining metrics to running evaluators.
|
||||
|
||||
To learn more, check out the [LangSmith evaluation how-to guides](https://docs.smith.langchain.com/how_to_guides#evaluation).
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
" * document addition by id (`add_documents` method with `ids` argument)\n",
|
||||
" * delete by id (`delete` method with `ids` argument)\n",
|
||||
"\n",
|
||||
"Compatible Vectorstores: `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
|
||||
" \n",
|
||||
"## Caution\n",
|
||||
"\n",
|
||||
@@ -786,7 +786,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.base import BaseLoader\n",
|
||||
"from langchain_core.document_loaders import BaseLoader\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomLoader(BaseLoader):\n",
|
||||
|
||||
@@ -39,9 +39,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
|
||||
@@ -41,7 +41,7 @@ pip install langchain-core
|
||||
```
|
||||
|
||||
## LangChain community
|
||||
The `langchain-community` package contains third-party integrations. It is automatically installed by `langchain`, but can also be used separately. Install with:
|
||||
The `langchain-community` package contains third-party integrations. Install with:
|
||||
|
||||
```bash
|
||||
pip install langchain-community
|
||||
|
||||
1142
docs/docs/how_to/lcel_cheatsheet.ipynb
Normal file
1142
docs/docs/how_to/lcel_cheatsheet.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -119,7 +119,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We can do the same thing with a SQLite cache\n",
|
||||
"from langchain.cache import SQLiteCache\n",
|
||||
"from langchain_community.cache import SQLiteCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(SQLiteCache(database_path=\".langchain.db\"))"
|
||||
]
|
||||
|
||||
@@ -2,169 +2,226 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e5715368",
|
||||
"id": "90dff237-bc28-4185-a2c0-d5203bbdeacd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to track token usage for LLMs\n",
|
||||
"\n",
|
||||
"This notebook goes over how to track your token usage for specific calls. It is currently only implemented for the OpenAI API.\n",
|
||||
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"Let's first look at an extremely simple example of tracking token usage for a single LLM call."
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [LLMs](/docs/concepts/#llms)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Using LangSmith\n",
|
||||
"\n",
|
||||
"You can use [LangSmith](https://www.langchain.com/langsmith) to help track token usage in your LLM application. See the [LangSmith quick start guide](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"## Using callbacks\n",
|
||||
"\n",
|
||||
"There are some API-specific callback context managers that allow you to track token usage across multiple calls. You'll need to check whether such an integration is available for your particular model.\n",
|
||||
"\n",
|
||||
"If such an integration is not available for your model, you can create a custom callback manager by adapting the implementation of the [OpenAI callback manager](https://api.python.langchain.com/en/latest/_modules/langchain_community/callbacks/openai_info.html#OpenAICallbackHandler).\n",
|
||||
"\n",
|
||||
"### OpenAI\n",
|
||||
"\n",
|
||||
"Let's first look at an extremely simple example of tracking token usage for a single Chat model call.\n",
|
||||
"\n",
|
||||
":::{.callout-danger}\n",
|
||||
"\n",
|
||||
"The callback handler does not currently support streaming token counts for legacy language models (e.g., `langchain_openai.OpenAI`). For support in a streaming context, refer to the corresponding guide for chat models [here](/docs/how_to/chat_token_usage_tracking).\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f790edd9-823e-4bc5-befa-e9529c7237a0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Single call"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "9455db35",
|
||||
"id": "2eebbee2-6ca1-4fa8-a3aa-0376888ceefb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Why don't scientists trust atoms?\n",
|
||||
"\n",
|
||||
"Because they make up everything.\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Total Tokens: 18\n",
|
||||
"Prompt Tokens: 4\n",
|
||||
"Completion Tokens: 14\n",
|
||||
"Total Cost (USD): $3.4e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(result)\n",
|
||||
" print(\"---\")\n",
|
||||
"print()\n",
|
||||
"\n",
|
||||
"print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
"print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
"print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
"print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7df3be35-dd97-4e3a-bd51-52434ab2249d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Multiple calls\n",
|
||||
"\n",
|
||||
"Anything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence to a chain. This will also work for an agent which may use multiple steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d1c55cc9",
|
||||
"id": "3ec10419-294c-44bf-af85-86aabf457cb6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Why did the chicken go to the seance?\n",
|
||||
"\n",
|
||||
"To talk to the other side of the road!\n",
|
||||
"--\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Why did the fish need a lawyer?\n",
|
||||
"\n",
|
||||
"Because it got caught in a net!\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"Total Tokens: 50\n",
|
||||
"Prompt Tokens: 12\n",
|
||||
"Completion Tokens: 38\n",
|
||||
"Total Cost (USD): $9.400000000000001e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)"
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"\n",
|
||||
"template = PromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||
"chain = template | llm\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" response = chain.invoke({\"topic\": \"birds\"})\n",
|
||||
" print(response)\n",
|
||||
" response = chain.invoke({\"topic\": \"fish\"})\n",
|
||||
" print(\"--\")\n",
|
||||
" print(response)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print()\n",
|
||||
"print(\"---\")\n",
|
||||
"print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
"print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
"print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
"print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ad7a3fba-9fac-4222-8f87-d1d276d27d6e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Streaming\n",
|
||||
"\n",
|
||||
":::{.callout-danger}\n",
|
||||
"\n",
|
||||
"`get_openai_callback` does not currently support streaming token counts for legacy language models (e.g., `langchain_openai.OpenAI`). If you want to count tokens correctly in a streaming context, there are a number of options:\n",
|
||||
"\n",
|
||||
"- Use chat models as described in [this guide](/docs/how_to/chat_token_usage_tracking);\n",
|
||||
"- Implement a [custom callback handler](/docs/how_to/custom_callbacks/) that uses appropriate tokenizers to count the tokens;\n",
|
||||
"- Use a monitoring platform such as [LangSmith](https://www.langchain.com/langsmith).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Note that when using legacy language models in a streaming context, token counts are not updated:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "31667d54",
|
||||
"metadata": {},
|
||||
"id": "cd61ed79-7858-49bb-afb5-d41291f597ba",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 37\n",
|
||||
"\tPrompt Tokens: 4\n",
|
||||
"\tCompletion Tokens: 33\n",
|
||||
"Successful Requests: 1\n",
|
||||
"Total Cost (USD): $7.2e-05\n"
|
||||
"\n",
|
||||
"\n",
|
||||
"Why don't scientists trust atoms?\n",
|
||||
"\n",
|
||||
"Because they make up everything!\n",
|
||||
"\n",
|
||||
"Why don't scientists trust atoms?\n",
|
||||
"\n",
|
||||
"Because they make up everything.\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Total Tokens: 0\n",
|
||||
"Prompt Tokens: 0\n",
|
||||
"Completion Tokens: 0\n",
|
||||
"Total Cost (USD): $0.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(cb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c0ab6d27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Anything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e09420f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"72\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" result2 = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d8186e7b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If a chain or agent with multiple steps in it is used, it will track all those steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "5d1125c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2f98c536",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m[\"Olivia Wilde and Harry Styles took fans by surprise with their whirlwind romance, which began when they met on the set of Don't Worry Darling.\", 'Olivia Wilde started dating Harry Styles after ending her years-long engagement to Jason Sudeikis — see their relationship timeline.', 'Olivia Wilde and Harry Styles were spotted early on in their relationship walking around London. (. Image ...', \"Looks like Olivia Wilde and Jason Sudeikis are starting 2023 on good terms. Amid their highly publicized custody battle – and the actress' ...\", 'The two started dating after Wilde split up with actor Jason Sudeikisin 2020. However, their relationship came to an end last November.', \"Olivia Wilde and Harry Styles started dating during the filming of Don't Worry Darling. While the movie got a lot of backlash because of the ...\", \"Here's what we know so far about Harry Styles and Olivia Wilde's relationship.\", 'Olivia and the Grammy winner kept their romance out of the spotlight as their relationship began just two months after her split from ex-fiancé ...', \"Harry Styles and Olivia Wilde first met on the set of Don't Worry Darling and stepped out as a couple in January 2021. Relive all their biggest relationship ...\"]\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m Harry Styles is Olivia Wilde's boyfriend.\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Harry Styles age\"\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m29 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 29 raised to the 0.23 power.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 29^0.23\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 2.169459462491557\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: Harry Styles is Olivia Wilde's boyfriend and his current age raised to the 0.23 power is 2.169459462491557.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Total Tokens: 2205\n",
|
||||
"Prompt Tokens: 2053\n",
|
||||
"Completion Tokens: 152\n",
|
||||
"Total Cost (USD): $0.0441\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" response = agent.run(\n",
|
||||
" \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\"\n",
|
||||
" )\n",
|
||||
" print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
" print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
" print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
" print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
" for chunk in llm.stream(\"Tell me a joke\"):\n",
|
||||
" print(chunk, end=\"\", flush=True)\n",
|
||||
" print(result)\n",
|
||||
" print(\"---\")\n",
|
||||
"print()\n",
|
||||
"\n",
|
||||
"print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
"print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
"print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
"print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "80ca77a3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -183,7 +240,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -134,8 +134,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = Ollama(\n",
|
||||
" model=\"llama2\", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])\n",
|
||||
@@ -288,9 +287,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_community.llms import LlamaCpp\n",
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler\n",
|
||||
"\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\",\n",
|
||||
|
||||
@@ -5,28 +5,38 @@
|
||||
"id": "fc0db1bc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to reorder retrieved results to put most relevant documents not in the middle\n",
|
||||
"# How to reorder retrieved results to mitigate the \"lost in the middle\" effect\n",
|
||||
"\n",
|
||||
"No matter the architecture of your model, there is a substantial performance degradation when you include 10+ retrieved documents.\n",
|
||||
"In brief: When models must access relevant information in the middle of long contexts, they tend to ignore the provided documents.\n",
|
||||
"See: https://arxiv.org/abs/2307.03172\n",
|
||||
"Substantial performance degradations in [RAG](/docs/tutorials/rag) applications have been [documented](https://arxiv.org/abs/2307.03172) as the number of retrieved documents grows (e.g., beyond ten). In brief: models are liable to miss relevant information in the middle of long contexts.\n",
|
||||
"\n",
|
||||
"To avoid this issue you can re-order documents after retrieval to avoid performance degradation."
|
||||
"By contrast, queries against vector stores will typically return documents in descending order of relevance (e.g., as measured by cosine similarity of [embeddings](/docs/concepts/#embedding-models)).\n",
|
||||
"\n",
|
||||
"To mitigate the [\"lost in the middle\"](https://arxiv.org/abs/2307.03172) effect, you can re-order documents after retrieval such that the most relevant documents are positioned at extrema (e.g., the first and last pieces of context), and the least relevant documents are positioned in the middle. In some cases this can help surface the most relevant information to LLMs.\n",
|
||||
"\n",
|
||||
"The [LongContextReorder](https://api.python.langchain.com/en/latest/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html) document transformer implements this re-ordering procedure. Below we demonstrate an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "74d1ebe8",
|
||||
"id": "2074fdaa-edff-468a-970f-6f5f26e93d4a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet sentence-transformers langchain-chroma langchain langchain-openai langchain-huggingface > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c97eaaf2-34b7-4770-9949-e1abc4ca5226",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we embed some artificial documents and index them in an (in-memory) [Chroma](/docs/integrations/providers/chroma/) vector store. We will use [Hugging Face](/docs/integrations/text_embedding/huggingfacehub/) embeddings, but any LangChain vector store or embeddings model will suffice."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"id": "49cbcd8e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -45,20 +55,14 @@
|
||||
" Document(page_content='This is just a random text.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain, StuffDocumentsChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_transformers import (\n",
|
||||
" LongContextReorder,\n",
|
||||
")\n",
|
||||
"from langchain_huggingface import HuggingFaceEmbeddings\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# Get embeddings.\n",
|
||||
"embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
|
||||
@@ -83,14 +87,22 @@
|
||||
"query = \"What can you tell me about the Celtics?\"\n",
|
||||
"\n",
|
||||
"# Get relevant documents ordered by relevance score\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "175d031a-43fa-42f4-93c4-2ba52c3c3ee5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that documents are returned in descending order of relevance to the query. The `LongContextReorder` document transformer will implement the re-ordering described above:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "34fb9d6e",
|
||||
"execution_count": 3,
|
||||
"id": "9a1181f2-a3dc-4614-9233-2196ab65939e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -108,12 +120,14 @@
|
||||
" Document(page_content='This is a document about the Boston Celtics')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_transformers import LongContextReorder\n",
|
||||
"\n",
|
||||
"# Reorder the documents:\n",
|
||||
"# Less relevant document will be at the middle of the list and more\n",
|
||||
"# relevant elements at beginning / end.\n",
|
||||
@@ -125,58 +139,54 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ceccab87",
|
||||
"cell_type": "markdown",
|
||||
"id": "a8d2ef0c-c397-4d8d-8118-3f7acf86d241",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nThe Celtics are referenced in four of the nine text extracts. They are mentioned as the favorite team of the author, the winner of a basketball game, a team with one of the best players, and a team with a specific player. Additionally, the last extract states that the document is about the Boston Celtics. This suggests that the Celtics are a basketball team, possibly from Boston, that is well-known and has had successful players and games in the past. '"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We prepare and run a custom Stuff chain with reordered docs as context.\n",
|
||||
"\n",
|
||||
"# Override prompts\n",
|
||||
"document_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"page_content\"], template=\"{page_content}\"\n",
|
||||
")\n",
|
||||
"document_variable_name = \"context\"\n",
|
||||
"llm = OpenAI()\n",
|
||||
"stuff_prompt_override = \"\"\"Given this text extracts:\n",
|
||||
"-----\n",
|
||||
"{context}\n",
|
||||
"-----\n",
|
||||
"Please answer the following question:\n",
|
||||
"{query}\"\"\"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=stuff_prompt_override, input_variables=[\"context\", \"query\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Instantiate the chain\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"chain = StuffDocumentsChain(\n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" document_prompt=document_prompt,\n",
|
||||
" document_variable_name=document_variable_name,\n",
|
||||
")\n",
|
||||
"chain.run(input_documents=reordered_docs, query=query)"
|
||||
"Below, we show how to incorporate the re-ordered documents into a simple question-answering chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d4696a97",
|
||||
"execution_count": 5,
|
||||
"id": "8bbea705-d5b9-4ed5-9957-e12547283622",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"The Celtics are a professional basketball team and one of the most iconic franchises in the NBA. They are highly regarded and have a large fan base. The team has had many successful seasons and is often considered one of the top teams in the league. They have a strong history and have produced many great players, such as Larry Bird and L. Kornet. The team is based in Boston and is often referred to as the Boston Celtics.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI()\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"\n",
|
||||
"Given these texts:\n",
|
||||
"-----\n",
|
||||
"{context}\n",
|
||||
"-----\n",
|
||||
"Please answer the following question:\n",
|
||||
"{query}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=prompt_template,\n",
|
||||
" input_variables=[\"context\", \"query\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Create and invoke the chain:\n",
|
||||
"chain = create_stuff_documents_chain(llm, prompt)\n",
|
||||
"response = chain.invoke({\"context\": reordered_docs, \"query\": query})\n",
|
||||
"print(response)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -195,7 +205,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
170
docs/docs/how_to/merge_message_runs.ipynb
Normal file
170
docs/docs/how_to/merge_message_runs.ipynb
Normal file
@@ -0,0 +1,170 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac47bfab-0f4f-42ce-8bb6-898ef22a0338",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to merge consecutive messages of the same type\n",
|
||||
"\n",
|
||||
"Certain models do not support passing in consecutive messages of the same type (a.k.a. \"runs\" of the same message type).\n",
|
||||
"\n",
|
||||
"The `merge_message_runs` utility makes it easy to merge consecutive messages of the same type.\n",
|
||||
"\n",
|
||||
"## Basic usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1a215bbb-c05c-40b0-a6fd-d94884d517df",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\")\n",
|
||||
"\n",
|
||||
"HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'])\n",
|
||||
"\n",
|
||||
"AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" merge_message_runs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\"you're a good assistant.\"),\n",
|
||||
" SystemMessage(\"you always respond with a joke.\"),\n",
|
||||
" HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}]),\n",
|
||||
" HumanMessage(\"and who is harrison chasing anyways\"),\n",
|
||||
" AIMessage(\n",
|
||||
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
|
||||
" ),\n",
|
||||
" AIMessage(\"Why, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"merged = merge_message_runs(messages)\n",
|
||||
"print(\"\\n\\n\".join([repr(x) for x in merged]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0544c811-7112-4b76-8877-cc897407c738",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Notice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b2eee74-71c8-4168-b968-bca580c25d18",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`merge_message_runs` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6d5a0283-11f8-435b-b27b-7b18f7693592",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=[], response_metadata={'id': 'msg_01D6R8Naum57q8qBau9vLBUX', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-ac0c465b-b54f-4b8b-9295-e5951250d653-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# pip install -U langchain-anthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)\n",
|
||||
"# Notice we don't pass in messages. This creates\n",
|
||||
"# a RunnableLambda that takes messages as input\n",
|
||||
"merger = merge_message_runs()\n",
|
||||
"chain = merger | llm\n",
|
||||
"chain.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72e90dce-693c-4842-9526-ce6460fe956b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the LangSmith trace we can see that before the messages are passed to the model they are merged: https://smith.langchain.com/public/ab558677-cac9-4c59-9066-1ecce5bcd87c/r\n",
|
||||
"\n",
|
||||
"Looking at just the merger, we can see that it's a Runnable object that can be invoked like all Runnables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "460817a6-c327-429d-958e-181a8c46059c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\"),\n",
|
||||
" HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways']),\n",
|
||||
" AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"merger.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4548d916-ce21-4dc6-8f19-eedb8003ace6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For a complete description of all arguments head to the API reference: https://api.python.langchain.com/en/latest/messages/langchain_core.messages.utils.merge_message_runs.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -344,7 +344,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain.memory import ChatMessageHistory\n",
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
|
||||
@@ -5,33 +5,36 @@
|
||||
"id": "d9172545",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use the MultiVector Retriever\n",
|
||||
"# How to retrieve using multiple vectors per document\n",
|
||||
"\n",
|
||||
"It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n",
|
||||
"It can often be useful to store multiple vectors per document. There are multiple use cases where this is beneficial. For example, we can embed multiple chunks of a document and associate those embeddings with the parent document, allowing retriever hits on the chunks to return the larger document.\n",
|
||||
"\n",
|
||||
"LangChain implements a base [MultiVectorRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html), which simplifies this process. Much of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n",
|
||||
"\n",
|
||||
"The methods to create multiple vectors per document include:\n",
|
||||
"\n",
|
||||
"- Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever).\n",
|
||||
"- Smaller chunks: split a document into smaller chunks, and embed those (this is [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html)).\n",
|
||||
"- Summary: create a summary for each document, embed that along with (or instead of) the document.\n",
|
||||
"- Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document.\n",
|
||||
"\n",
|
||||
"Note that this also enables another method of adding embeddings - manually. This is useful because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control.\n",
|
||||
"\n",
|
||||
"Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control."
|
||||
"Below we walk through an example. First we instantiate some documents. We will index them in an (in-memory) [Chroma](/docs/integrations/providers/chroma/) vector store using [OpenAI](https://python.langchain.com/v0.2/docs/integrations/text_embedding/openai/) embeddings, but any LangChain vector store or embeddings model will suffice."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "09cecd95-3499-465a-895a-944627ffb77f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-chroma langchain langchain-openai > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "eed469be",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "18c1421a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -40,25 +43,22 @@
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6d869496",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"loaders = [\n",
|
||||
" TextLoader(\"../../paul_graham_essay.txt\"),\n",
|
||||
" TextLoader(\"paul_graham_essay.txt\"),\n",
|
||||
" TextLoader(\"state_of_the_union.txt\"),\n",
|
||||
"]\n",
|
||||
"docs = []\n",
|
||||
"for loader in loaders:\n",
|
||||
" docs.extend(loader.load())\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)\n",
|
||||
"docs = text_splitter.split_documents(docs)"
|
||||
"docs = text_splitter.split_documents(docs)\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -68,52 +68,54 @@
|
||||
"source": [
|
||||
"## Smaller chunks\n",
|
||||
"\n",
|
||||
"Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood."
|
||||
"Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the [ParentDocumentRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html) does. Here we show what is going on under the hood.\n",
|
||||
"\n",
|
||||
"We will make a distinction between the vector store, which indexes embeddings of the (sub) documents, and the document store, which houses the \"parent\" documents and associates them with an identifier."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "0e7b6b45",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"\n",
|
||||
"# The storage layer for the parent documents\n",
|
||||
"store = InMemoryByteStore()\n",
|
||||
"id_key = \"doc_id\"\n",
|
||||
"\n",
|
||||
"# The retriever (empty to start)\n",
|
||||
"retriever = MultiVectorRetriever(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "72a36491",
|
||||
"cell_type": "markdown",
|
||||
"id": "d4feded4-856a-4282-91c3-53aabc62e6ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The splitter to use to create smaller chunks\n",
|
||||
"child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)"
|
||||
"We next generate the \"sub\" documents by splitting the original documents. Note that we store the document identifier in the `metadata` of the corresponding [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"id": "5d23247d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# The splitter to use to create smaller chunks\n",
|
||||
"child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n",
|
||||
"\n",
|
||||
"sub_docs = []\n",
|
||||
"for i, doc in enumerate(docs):\n",
|
||||
" _id = doc_ids[i]\n",
|
||||
@@ -123,9 +125,17 @@
|
||||
" sub_docs.extend(_sub_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e0634f8-90d5-4250-981a-5257c8a6d455",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, we index the documents in our vector store and document store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"id": "92ed5861",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -134,31 +144,46 @@
|
||||
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14c48c6d-850c-4317-9b6e-1ade92f2f710",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The vector store alone will retrieve small chunks:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"id": "8afed60c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '2fd77862-9ed5-4fad-bf76-e487b747b333', 'source': 'state_of_the_union.txt'})"
|
||||
"Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '064eca46-a4c4-4789-8e3b-583f9597e54f', 'source': 'state_of_the_union.txt'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Vectorstore alone retrieves the small chunks\n",
|
||||
"retriever.vectorstore.similarity_search(\"justice breyer\")[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "717097c7-61d9-4306-8625-ef8f1940c127",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Whereas the retriever will return the larger parent document:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"id": "3c9017f1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -168,14 +193,13 @@
|
||||
"9875"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Retriever returns larger chunks\n",
|
||||
"len(retriever.get_relevant_documents(\"justice breyer\")[0].page_content)"
|
||||
"len(retriever.invoke(\"justice breyer\")[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -183,12 +207,12 @@
|
||||
"id": "cdef8339-f9fa-4b3b-955f-ad9dbdf2734f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The default search type the retriever performs on the vector database is a similarity search. LangChain Vector Stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search) so if you want this instead you can just set the `search_type` property as follows:"
|
||||
"The default search type the retriever performs on the vector database is a similarity search. LangChain vector stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search). This can be controlled via the `search_type` parameter of the retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"id": "36739460-a737-4a8e-b70f-50bf8c8eaae7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -198,7 +222,7 @@
|
||||
"9875"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -208,7 +232,7 @@
|
||||
"\n",
|
||||
"retriever.search_type = SearchType.mmr\n",
|
||||
"\n",
|
||||
"len(retriever.get_relevant_documents(\"justice breyer\")[0].page_content)"
|
||||
"len(retriever.invoke(\"justice breyer\")[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -216,14 +240,37 @@
|
||||
"id": "d6a7ae0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Summary\n",
|
||||
"## Associating summaries with a document for retrieval\n",
|
||||
"\n",
|
||||
"Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those."
|
||||
"A summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.\n",
|
||||
"\n",
|
||||
"We construct a simple [chain](/docs/how_to/sequence) that will receive an input [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) object and generate a summary using a LLM.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 8,
|
||||
"id": "6589291f-55bb-4e9a-b4ff-08f2506ed641",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1433dff4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -233,27 +280,26 @@
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "35b30390",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"doc\": lambda x: x.page_content}\n",
|
||||
" | ChatPromptTemplate.from_template(\"Summarize the following document:\\n\\n{doc}\")\n",
|
||||
" | ChatOpenAI(max_retries=0)\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3faa9fde-1b09-4849-a815-8b2e89c30a02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we can [batch](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain accross documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 10,
|
||||
"id": "41a2a738",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -261,9 +307,17 @@
|
||||
"summaries = chain.batch(docs, {\"max_concurrency\": 5})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73ef599e-140b-4905-8b62-6c52cdde1852",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can then initialize a `MultiVectorRetriever` as before, indexing the summaries in our vector store, and retaining the original documents in our document store:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 11,
|
||||
"id": "7ac5e4b1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -279,29 +333,13 @@
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "0d93309f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]\n",
|
||||
"\n",
|
||||
"summary_docs = [\n",
|
||||
" Document(page_content=s, metadata={id_key: doc_ids[i]})\n",
|
||||
" for i, s in enumerate(summaries)\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "6d5edf0d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"]\n",
|
||||
"\n",
|
||||
"retriever.vectorstore.add_documents(summary_docs)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
|
||||
]
|
||||
@@ -320,50 +358,48 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "299232d6",
|
||||
"cell_type": "markdown",
|
||||
"id": "f0274892-29c1-4616-9040-d23f9d537526",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sub_docs = vectorstore.similarity_search(\"justice breyer\")"
|
||||
"Querying the vector store will return summaries:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "10e404c0",
|
||||
"execution_count": 12,
|
||||
"id": "299232d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content=\"The document is a speech given by President Biden addressing various issues and outlining his agenda for the nation. He highlights the importance of nominating a Supreme Court justice and introduces his nominee, Judge Ketanji Brown Jackson. He emphasizes the need to secure the border and reform the immigration system, including providing a pathway to citizenship for Dreamers and essential workers. The President also discusses the protection of women's rights, including access to healthcare and the right to choose. He calls for the passage of the Equality Act to protect LGBTQ+ rights. Additionally, President Biden discusses the need to address the opioid epidemic, improve mental health services, support veterans, and fight against cancer. He expresses optimism for the future of America and the strength of the American people.\", metadata={'doc_id': '56345bff-3ead-418c-a4ff-dff203f77474'})"
|
||||
"Document(page_content=\"President Biden recently nominated Judge Ketanji Brown Jackson to serve on the United States Supreme Court, emphasizing her qualifications and broad support. The President also outlined a plan to secure the border, fix the immigration system, protect women's rights, support LGBTQ+ Americans, and advance mental health services. He highlighted the importance of bipartisan unity in passing legislation, such as the Violence Against Women Act. The President also addressed supporting veterans, particularly those impacted by exposure to burn pits, and announced plans to expand benefits for veterans with respiratory cancers. Additionally, he proposed a plan to end cancer as we know it through the Cancer Moonshot initiative. President Biden expressed optimism about the future of America and emphasized the strength of the American people in overcoming challenges.\", metadata={'doc_id': '84015b1b-980e-400a-94d8-cf95d7e079bd'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sub_docs = retriever.vectorstore.similarity_search(\"justice breyer\")\n",
|
||||
"\n",
|
||||
"sub_docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "e4cce5c2",
|
||||
"cell_type": "markdown",
|
||||
"id": "e4f77ac5-2926-4f60-aad5-b2067900dff9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
|
||||
"Whereas the retriever will return the larger source document:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "c8570dbb",
|
||||
"execution_count": 13,
|
||||
"id": "e4cce5c2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -372,12 +408,14 @@
|
||||
"9194"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retrieved_docs = retriever.invoke(\"justice breyer\")\n",
|
||||
"\n",
|
||||
"len(retrieved_docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
@@ -388,42 +426,28 @@
|
||||
"source": [
|
||||
"## Hypothetical Queries\n",
|
||||
"\n",
|
||||
"An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document. These questions can then be embedded"
|
||||
"An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document, which might bear close semantic similarity to relevant queries in a [RAG](/docs/tutorials/rag) application. These questions can then be embedded and associated with the documents to improve retrieval.\n",
|
||||
"\n",
|
||||
"Below, we use the [with_structured_output](/docs/how_to/structured_output/) method to structure the LLM output into a list of strings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "5219b085",
|
||||
"execution_count": 16,
|
||||
"id": "03d85234-c33a-4a43-861d-47328e1ec2ea",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"functions = [\n",
|
||||
" {\n",
|
||||
" \"name\": \"hypothetical_questions\",\n",
|
||||
" \"description\": \"Generate hypothetical questions\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"questions\": {\n",
|
||||
" \"type\": \"array\",\n",
|
||||
" \"items\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"questions\"],\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "523deb92",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class HypotheticalQuestions(BaseModel):\n",
|
||||
" \"\"\"Generate hypothetical questions.\"\"\"\n",
|
||||
"\n",
|
||||
" questions: List[str] = Field(..., description=\"List of questions\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"doc\": lambda x: x.page_content}\n",
|
||||
@@ -431,28 +455,36 @@
|
||||
" | ChatPromptTemplate.from_template(\n",
|
||||
" \"Generate a list of exactly 3 hypothetical questions that the below document could be used to answer:\\n\\n{doc}\"\n",
|
||||
" )\n",
|
||||
" | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(\n",
|
||||
" functions=functions, function_call={\"name\": \"hypothetical_questions\"}\n",
|
||||
" | ChatOpenAI(max_retries=0, model=\"gpt-4o\").with_structured_output(\n",
|
||||
" HypotheticalQuestions\n",
|
||||
" )\n",
|
||||
" | JsonKeyOutputFunctionsParser(key_name=\"questions\")\n",
|
||||
" | (lambda x: x.questions)\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6dddc40f-62af-413c-b944-f94a5e1f2f4e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Invoking the chain on a single document demonstrates that it outputs a list of questions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 17,
|
||||
"id": "11d30554",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[\"What was the author's first experience with programming like?\",\n",
|
||||
" 'Why did the author switch their focus from AI to Lisp during their graduate studies?',\n",
|
||||
" 'What led the author to contemplate a career in art instead of computer science?']"
|
||||
"[\"What impact did the IBM 1401 have on the author's early programming experiences?\",\n",
|
||||
" \"How did the transition from using the IBM 1401 to microcomputers influence the author's programming journey?\",\n",
|
||||
" \"What role did Lisp play in shaping the author's understanding and approach to AI?\"]"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -462,22 +494,24 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "3eb2e48c",
|
||||
"cell_type": "markdown",
|
||||
"id": "dcffc572-7b20-4b77-857a-90ec360a8f7e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5})"
|
||||
"We can batch then batch the chain over all documents and assemble our vector store and document store as before:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 18,
|
||||
"id": "b2cd6e75",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Batch chain over documents to generate hypothetical questions\n",
|
||||
"hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5})\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(\n",
|
||||
" collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings()\n",
|
||||
@@ -491,82 +525,67 @@
|
||||
" byte_store=store,\n",
|
||||
" id_key=id_key,\n",
|
||||
")\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "18831b3b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in docs]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Generate Document objects from hypothetical questions\n",
|
||||
"question_docs = []\n",
|
||||
"for i, question_list in enumerate(hypothetical_questions):\n",
|
||||
" question_docs.extend(\n",
|
||||
" [Document(page_content=s, metadata={id_key: doc_ids[i]}) for s in question_list]\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "224b24c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"retriever.vectorstore.add_documents(question_docs)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "7b442b90",
|
||||
"cell_type": "markdown",
|
||||
"id": "75cba8ab-a06f-4545-85fc-cf49d0204b5e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sub_docs = vectorstore.similarity_search(\"justice breyer\")"
|
||||
"Note that querying the underlying vector store will retrieve hypothetical questions that are semantically similar to the input query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "089b5ad0",
|
||||
"execution_count": 19,
|
||||
"id": "7b442b90",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Who has been nominated to serve on the United States Supreme Court?', metadata={'doc_id': '0b3a349e-c936-4e77-9c40-0a39fc3e07f0'}),\n",
|
||||
" Document(page_content=\"What was the context and content of Robert Morris' advice to the document's author in 2010?\", metadata={'doc_id': 'b2b2cdca-988a-4af1-ba47-46170770bc8c'}),\n",
|
||||
" Document(page_content='How did personal circumstances influence the decision to pass on the leadership of Y Combinator?', metadata={'doc_id': 'b2b2cdca-988a-4af1-ba47-46170770bc8c'}),\n",
|
||||
" Document(page_content='What were the reasons for the author leaving Yahoo in the summer of 1999?', metadata={'doc_id': 'ce4f4981-ca60-4f56-86f0-89466de62325'})]"
|
||||
"[Document(page_content='What might be the potential benefits of nominating Circuit Court of Appeals Judge Ketanji Brown Jackson to the United States Supreme Court?', metadata={'doc_id': '43292b74-d1b8-4200-8a8b-ea0cb57fbcdb'}),\n",
|
||||
" Document(page_content='How might the Bipartisan Infrastructure Law impact the economic competition between the U.S. and China?', metadata={'doc_id': '66174780-d00c-4166-9791-f0069846e734'}),\n",
|
||||
" Document(page_content='What factors led to the creation of Y Combinator?', metadata={'doc_id': '72003c4e-4cc9-4f09-a787-0b541a65b38c'}),\n",
|
||||
" Document(page_content='How did the ability to publish essays online change the landscape for writers and thinkers?', metadata={'doc_id': 'e8d2c648-f245-4bcc-b8d3-14e64a164b64'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sub_docs = retriever.vectorstore.similarity_search(\"justice breyer\")\n",
|
||||
"\n",
|
||||
"sub_docs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "7594b24e",
|
||||
"cell_type": "markdown",
|
||||
"id": "63c32e43-5f4a-463b-a0c2-2101986f70e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
|
||||
"And invoking the retriever will return the corresponding document:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "4c120c65",
|
||||
"execution_count": 20,
|
||||
"id": "7594b24e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -575,22 +594,15 @@
|
||||
"9194"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retrieved_docs = retriever.invoke(\"justice breyer\")\n",
|
||||
"len(retrieved_docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "005072b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -609,7 +621,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
228
docs/docs/how_to/multimodal_inputs.ipynb
Normal file
228
docs/docs/how_to/multimodal_inputs.ipynb
Normal file
@@ -0,0 +1,228 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4facdf7f-680e-4d28-908b-2b8408e2a741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass multimodal data directly to models\n",
|
||||
"\n",
|
||||
"Here we demonstrate how to pass multimodal input directly to models. \n",
|
||||
"We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision).\n",
|
||||
"For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format.\n",
|
||||
"\n",
|
||||
"In this example we will ask a model to describe an image."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "fb896ce9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fca4da7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The most commonly supported way to pass in images is to pass it in as a byte string.\n",
|
||||
"This should work for most model integrations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9ca1040c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ec680b6b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_data}\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8656018e-c56d-47d2-b2be-71e87827f90a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can feed the image URL directly in a content block of type \"image_url\". Note that only some model providers support this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered clouds, suggesting good visibility and a likely pleasant temperature. The bright sunlight is casting distinct shadows on the grass and vegetation, indicating it is likely daytime, possibly late morning or early afternoon. The overall ambiance suggests a warm and inviting day, suitable for outdoor activities.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c470309",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also pass in multiple images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "325fb4ca",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Yes, the two images are the same. They both depict a wooden boardwalk extending through a grassy field under a blue sky with light clouds. The scenery, lighting, and composition are identical.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"are these two images the same?\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71bd28cf-d76c-44e2-a55e-c5f265db986e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calls\n",
|
||||
"\n",
|
||||
"Some multimodal models support [tool calling](/docs/concepts/#functiontool-calling) features as well. To call tools using such models, simply bind tools to them in the [usual way](/docs/how_to/tool_calling), and invoke the model using content blocks of the desired type (e.g., containing image data)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "cd22ea82-2f93-46f9-9f7a-6aaf479fcaa9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'call_BSX4oq4SKnLlp2WlzDhToHBr'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Literal\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def weather_tool(weather: Literal[\"sunny\", \"cloudy\", \"rainy\"]) -> None:\n",
|
||||
" \"\"\"Describe the weather\"\"\"\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model_with_tools = model.bind_tools([weather_tool])\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model_with_tools.invoke([message])\n",
|
||||
"print(response.tool_calls)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
189
docs/docs/how_to/multimodal_prompts.ipynb
Normal file
189
docs/docs/how_to/multimodal_prompts.ipynb
Normal file
@@ -0,0 +1,189 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4facdf7f-680e-4d28-908b-2b8408e2a741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use multimodal prompts\n",
|
||||
"\n",
|
||||
"Here we demonstrate how to use prompt templates to format multimodal inputs to models. \n",
|
||||
"\n",
|
||||
"In this example we will ask a model to describe an image."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n",
|
||||
"image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2671f995",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "4ee35e4f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Describe the image provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data}\"},\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "089f75c2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "02744b06",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The image depicts a sunny day with a beautiful blue sky filled with scattered white clouds. The sky has varying shades of blue, ranging from a deeper hue near the horizon to a lighter, almost pale blue higher up. The white clouds are fluffy and scattered across the expanse of the sky, creating a peaceful and serene atmosphere. The lighting and cloud patterns suggest pleasant weather conditions, likely during the daytime hours on a mild, sunny day in an outdoor natural setting.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = chain.invoke({\"image_data\": image_data})\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9b9ebf6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also pass in multiple images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "02190ee3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"compare the two pictures provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data1}\"},\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data2}\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "42af057b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "513abe00",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The two images provided are identical. Both images feature a wooden boardwalk path extending through a lush green field under a bright blue sky with some clouds. The perspective, colors, and elements in both images are exactly the same.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = chain.invoke({\"image_data1\": image_data, \"image_data2\": image_data})\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ea8152c3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -23,7 +23,7 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.output_parsers import PydanticOutputParser\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user