mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-23 21:31:02 +00:00
Compare commits
3816 Commits
dev2049/do
...
v0.0.337
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e584b28c54 | ||
|
|
e80b53ff4f | ||
|
|
2e2114d2d0 | ||
|
|
0fc3af8932 | ||
|
|
b4312aac5c | ||
|
|
35e04f204b | ||
|
|
c1b041c188 | ||
|
|
21552628c8 | ||
|
|
7f8fd70ac4 | ||
|
|
e3a5cd7969 | ||
|
|
1d2981114f | ||
|
|
9dfad613c2 | ||
|
|
d7f014cd89 | ||
|
|
41a433fa33 | ||
|
|
ea6e017b85 | ||
|
|
427331d621 | ||
|
|
75363f048f | ||
|
|
9ff8f69e75 | ||
|
|
324ab382ad | ||
|
|
b029d9f4e6 | ||
|
|
1e43fd6afe | ||
|
|
283ef1f66d | ||
|
|
b1fcf5b481 | ||
|
|
6030ab9779 | ||
|
|
cf66a4737d | ||
|
|
10fddac4b5 | ||
|
|
d5b1a21ae4 | ||
|
|
17c2007e0c | ||
|
|
f90249305a | ||
|
|
9e6748e198 | ||
|
|
8a52c1456b | ||
|
|
79fa9a81f4 | ||
|
|
a632f61f3d | ||
|
|
f0bb839506 | ||
|
|
a9b2c943e6 | ||
|
|
1372296dc8 | ||
|
|
accadccf8e | ||
|
|
ba501b27a0 | ||
|
|
1726d5dcdd | ||
|
|
85a77d2c27 | ||
|
|
76c317ed78 | ||
|
|
a0b39a4325 | ||
|
|
8823e3831f | ||
|
|
9f543634e2 | ||
|
|
d5aeff706a | ||
|
|
bed06a4f4a | ||
|
|
3b5e8bacfa | ||
|
|
c9b9359647 | ||
|
|
0f25ea9671 | ||
|
|
37eb44c591 | ||
|
|
91443cacdb | ||
|
|
ac7e88fbbe | ||
|
|
342ed5c77a | ||
|
|
38180ad25f | ||
|
|
9545f0666d | ||
|
|
7c3066f9ec | ||
|
|
3596be5210 | ||
|
|
d63d4994c0 | ||
|
|
2ebd167dba | ||
|
|
344cab0739 | ||
|
|
fc886cc303 | ||
|
|
f5bf3bdf14 | ||
|
|
c0e6045c0b | ||
|
|
927824b7cb | ||
|
|
2f6fe6ddf3 | ||
|
|
58f5a4d30a | ||
|
|
be854225c7 | ||
|
|
4b7a85887e | ||
|
|
5a920e14c0 | ||
|
|
1c67db4c18 | ||
|
|
8006919e52 | ||
|
|
c3f94f4c12 | ||
|
|
5f60439221 | ||
|
|
2ff30b50f2 | ||
|
|
280ecfd8eb | ||
|
|
a591cdb67d | ||
|
|
9b4974871d | ||
|
|
39852dffd2 | ||
|
|
50a5c919f0 | ||
|
|
b46f88d364 | ||
|
|
ff19a62afc | ||
|
|
2e42ed5de6 | ||
|
|
1e43025bf5 | ||
|
|
9169d77cf6 | ||
|
|
32c493e3df | ||
|
|
f22f273f93 | ||
|
|
971d2b2e34 | ||
|
|
3ad78e48e2 | ||
|
|
18acc22f29 | ||
|
|
46af56dc4f | ||
|
|
2aa13f1e10 | ||
|
|
4da2faba41 | ||
|
|
700293cae9 | ||
|
|
cc55d2fcee | ||
|
|
545b76b0fd | ||
|
|
9024593468 | ||
|
|
f55f67055f | ||
|
|
f70aa82c84 | ||
|
|
0f31cd8b49 | ||
|
|
e1c020dfe1 | ||
|
|
96b56a4d4f | ||
|
|
64e11592bb | ||
|
|
339973db47 | ||
|
|
e89e830c55 | ||
|
|
c40973814d | ||
|
|
8f81703d76 | ||
|
|
ea6dd3a550 | ||
|
|
a837b03e55 | ||
|
|
7f1d26160d | ||
|
|
8d6faf5665 | ||
|
|
7f1964b264 | ||
|
|
937d7c41f3 | ||
|
|
9c7afa8adb | ||
|
|
180657ca7a | ||
|
|
1a1a1a883f | ||
|
|
8fdf15c023 | ||
|
|
72ad448daa | ||
|
|
8fa960641a | ||
|
|
e165daa0ae | ||
|
|
93ae589f1b | ||
|
|
0dc4ab0be1 | ||
|
|
bf8cf7e042 | ||
|
|
d266b3ea4a | ||
|
|
52f34de9b7 | ||
|
|
b0e8cbe0b3 | ||
|
|
869df62736 | ||
|
|
8313c218da | ||
|
|
a26105de8e | ||
|
|
24386e0860 | ||
|
|
d2e50b3108 | ||
|
|
55912868da | ||
|
|
362a446999 | ||
|
|
b2b94424db | ||
|
|
dd7959f4ac | ||
|
|
86b93b5810 | ||
|
|
fbf7047468 | ||
|
|
0a2b1c7471 | ||
|
|
850336bcf1 | ||
|
|
cf271784fa | ||
|
|
ee3ceb0fb8 | ||
|
|
defd4b4f11 | ||
|
|
d9e493e96c | ||
|
|
e76ff63125 | ||
|
|
fceae456b9 | ||
|
|
c63eb9d797 | ||
|
|
28cc60b347 | ||
|
|
555ce600ef | ||
|
|
ff43cd6701 | ||
|
|
8ad3b255dc | ||
|
|
eb51150557 | ||
|
|
b298f550fe | ||
|
|
84e65533e9 | ||
|
|
1311450646 | ||
|
|
8b2a82b5ce | ||
|
|
58da6e0d47 | ||
|
|
150d58304d | ||
|
|
f04cc4b7e1 | ||
|
|
b346d4a455 | ||
|
|
5f38770161 | ||
|
|
c52725bdc5 | ||
|
|
0fc8fd12bd | ||
|
|
3dbaaf59b2 | ||
|
|
76283e9625 | ||
|
|
18601bd4c8 | ||
|
|
72e12f6bcf | ||
|
|
1703f132c6 | ||
|
|
9fdfac22c2 | ||
|
|
1f85ec34d5 | ||
|
|
9f077270c8 | ||
|
|
f15f8e01cf | ||
|
|
37561d8986 | ||
|
|
06c503f672 | ||
|
|
55aeff6777 | ||
|
|
a9b70baef9 | ||
|
|
1f27104626 | ||
|
|
d26fd6f0d1 | ||
|
|
6f45532620 | ||
|
|
54ad3cc2b8 | ||
|
|
506f81563f | ||
|
|
db4b97d590 | ||
|
|
4f4b020582 | ||
|
|
13bd83bd61 | ||
|
|
5ac2fc5bb2 | ||
|
|
600caff03c | ||
|
|
cf481c9418 | ||
|
|
57e19989f6 | ||
|
|
74134dd7e1 | ||
|
|
d9abcf1aae | ||
|
|
2287a311cf | ||
|
|
6175dc30aa | ||
|
|
ff87f4b4f9 | ||
|
|
99ffeb239f | ||
|
|
ce21308f29 | ||
|
|
0c81cd923e | ||
|
|
fdbb45d79e | ||
|
|
3bb8030a6e | ||
|
|
a9002a82b8 | ||
|
|
c27400efeb | ||
|
|
388f248391 | ||
|
|
4f7dff9d66 | ||
|
|
8e0cb2eb84 | ||
|
|
52d0055a91 | ||
|
|
8e0dcb37d2 | ||
|
|
59d0bd2150 | ||
|
|
b376854b26 | ||
|
|
58889149c2 | ||
|
|
52503a367f | ||
|
|
622bf12c2e | ||
|
|
8c02f4fbd8 | ||
|
|
8d7144e6a6 | ||
|
|
5bb2ea51a5 | ||
|
|
1eb7d3a862 | ||
|
|
37da6e546b | ||
|
|
621419f71e | ||
|
|
8fe6bcc662 | ||
|
|
ada3d2cbd1 | ||
|
|
0378662e1d | ||
|
|
1a92d2245d | ||
|
|
53f453f01a | ||
|
|
a4d9e986fb | ||
|
|
5000c7308e | ||
|
|
aba407f774 | ||
|
|
60d025b83b | ||
|
|
e43b4079c8 | ||
|
|
e14aa37d59 | ||
|
|
f04e4df7f9 | ||
|
|
66c41c0dbf | ||
|
|
f41f4c5e37 | ||
|
|
ea1ab391d4 | ||
|
|
ebee616822 | ||
|
|
0dbdb8498a | ||
|
|
83cee2cec4 | ||
|
|
6c237716c4 | ||
|
|
7db49d3842 | ||
|
|
1bc35f61cb | ||
|
|
76bcac5bb3 | ||
|
|
523e5803bb | ||
|
|
18005c6384 | ||
|
|
0da75b9ebd | ||
|
|
98aff29fbd | ||
|
|
f573a4d0b3 | ||
|
|
e112b2f2e6 | ||
|
|
2e2b9c76d9 | ||
|
|
4fe9bf70b6 | ||
|
|
26c4ec1eaf | ||
|
|
2683c2fc53 | ||
|
|
5c0e9ac578 | ||
|
|
658a3a8607 | ||
|
|
c04647bb4e | ||
|
|
88b506b321 | ||
|
|
a2bb0dd445 | ||
|
|
2fdaa1e5fd | ||
|
|
1b233798a0 | ||
|
|
0cbdba6a9b | ||
|
|
cc3d3920e3 | ||
|
|
526313002c | ||
|
|
6609a6033f | ||
|
|
f66a9d2adf | ||
|
|
21eeba075c | ||
|
|
3276aa3e17 | ||
|
|
d966e4d13a | ||
|
|
71d1a48b66 | ||
|
|
9214d8e6ed | ||
|
|
185ddc573e | ||
|
|
25ee10ed4f | ||
|
|
1f7e811156 | ||
|
|
9b02f7d59c | ||
|
|
2a9f40ed28 | ||
|
|
c4fdf78d03 | ||
|
|
49e283a0cd | ||
|
|
d1c6ad7769 | ||
|
|
070823f294 | ||
|
|
979501c0ca | ||
|
|
9369d6aca0 | ||
|
|
33810126bd | ||
|
|
58b90f30b0 | ||
|
|
a228f340f1 | ||
|
|
da821320d3 | ||
|
|
67b6f4dc71 | ||
|
|
b1caae62fd | ||
|
|
4421ba46d7 | ||
|
|
0e1aedb9f4 | ||
|
|
ab5309f6f2 | ||
|
|
6406c53089 | ||
|
|
14340ee7cd | ||
|
|
eee5181b7a | ||
|
|
3405dbbc64 | ||
|
|
8bd3ce59cd | ||
|
|
b1954aab13 | ||
|
|
7148f3e1fe | ||
|
|
8dbbcf0b6c | ||
|
|
ae63c186af | ||
|
|
5ae51a8a85 | ||
|
|
724b92231d | ||
|
|
0ea837404a | ||
|
|
321cd44f13 | ||
|
|
44c8b159b9 | ||
|
|
b825dddf95 | ||
|
|
f0eba1ac63 | ||
|
|
392cfbee24 | ||
|
|
ddcec005bc | ||
|
|
09711ad5a1 | ||
|
|
01a3c9b94e | ||
|
|
f7f35a9102 | ||
|
|
bd668fcea1 | ||
|
|
bf5805bb32 | ||
|
|
c9a6940d58 | ||
|
|
9e17d1a225 | ||
|
|
aa3f4a9bc8 | ||
|
|
e8b99364b3 | ||
|
|
9a10b2b047 | ||
|
|
acfc485808 | ||
|
|
d26ac5f999 | ||
|
|
c871cc5055 | ||
|
|
2a7e0a27cb | ||
|
|
360cff81a3 | ||
|
|
da94c750c5 | ||
|
|
14e8c74736 | ||
|
|
a4e4b5a86f | ||
|
|
5308b836c7 | ||
|
|
94f018f1ba | ||
|
|
912ace18e9 | ||
|
|
b74468f399 | ||
|
|
72fa5a463d | ||
|
|
17c2e3b87e | ||
|
|
7f6e751a3d | ||
|
|
a53cac4508 | ||
|
|
944cb552bb | ||
|
|
88f0f1e73b | ||
|
|
f94e24dfd7 | ||
|
|
bfd719f9d8 | ||
|
|
3143324984 | ||
|
|
2f563cee20 | ||
|
|
bcc62d63be | ||
|
|
a1fae1fddd | ||
|
|
00766c9f31 | ||
|
|
1dbb77d7db | ||
|
|
92bf40a921 | ||
|
|
342d6c7ab6 | ||
|
|
b109cb031b | ||
|
|
f35a65124a | ||
|
|
75bb28afd8 | ||
|
|
a32c236c64 | ||
|
|
b97b9eda21 | ||
|
|
0c7f1d8b21 | ||
|
|
a7d5e0ce8a | ||
|
|
e933212a3d | ||
|
|
f39246bd7e | ||
|
|
8b5e879171 | ||
|
|
9bedda50f2 | ||
|
|
3243dcc83e | ||
|
|
99b69fe607 | ||
|
|
f6f3ca12e7 | ||
|
|
481bf6fae6 | ||
|
|
b5c17ff188 | ||
|
|
d39b4b61b6 | ||
|
|
e914283cf9 | ||
|
|
016813d189 | ||
|
|
630ae24b28 | ||
|
|
33e77a1007 | ||
|
|
26f0ca222d | ||
|
|
13b89815a3 | ||
|
|
bfb27324cb | ||
|
|
3c5c384f1a | ||
|
|
1d51363e49 | ||
|
|
e53b9ccd70 | ||
|
|
1f2c672d4a | ||
|
|
199630ff93 | ||
|
|
6e702b9c36 | ||
|
|
c57945e0a8 | ||
|
|
08103e6d48 | ||
|
|
b1e3843931 | ||
|
|
37aec1e050 | ||
|
|
1b1a2d5740 | ||
|
|
7897483819 | ||
|
|
8e88ba16a8 | ||
|
|
b2138508cb | ||
|
|
e05bb938de | ||
|
|
d1fdcd4fcb | ||
|
|
64c4a698a8 | ||
|
|
3468c038ba | ||
|
|
d31d705407 | ||
|
|
0b4b9e61fc | ||
|
|
2424fff3f1 | ||
|
|
56cc5b847c | ||
|
|
b257e6a4e8 | ||
|
|
35d726dc15 | ||
|
|
9dead1034c | ||
|
|
1815ea2fdb | ||
|
|
a830b809f3 | ||
|
|
36204c2baf | ||
|
|
9e0ae56287 | ||
|
|
d85d4d7822 | ||
|
|
0660c06cf1 | ||
|
|
79cf01366e | ||
|
|
61f5ea4b5e | ||
|
|
221134d239 | ||
|
|
e130680d74 | ||
|
|
689853902e | ||
|
|
eb903e211c | ||
|
|
4209457bdc | ||
|
|
9adaa78c65 | ||
|
|
5545de0466 | ||
|
|
5c2243ee91 | ||
|
|
f10c17c6a4 | ||
|
|
a476147189 | ||
|
|
df4960a6d8 | ||
|
|
389459af8f | ||
|
|
60d009f75a | ||
|
|
11505f95d3 | ||
|
|
38cee5fae0 | ||
|
|
3afa68e30e | ||
|
|
5c564e62e1 | ||
|
|
5d40e36c75 | ||
|
|
c2a0a6b6df | ||
|
|
d6888a90d0 | ||
|
|
6908634428 | ||
|
|
3fd9f2752f | ||
|
|
d38c8369b3 | ||
|
|
2c58dca5f0 | ||
|
|
48fde2004f | ||
|
|
a8c68d4ffa | ||
|
|
224ec0cfd3 | ||
|
|
d12b88557a | ||
|
|
cadfce295f | ||
|
|
68e12d34a9 | ||
|
|
0ca539eb85 | ||
|
|
05bbf943f2 | ||
|
|
134f085824 | ||
|
|
52c194ec3a | ||
|
|
c8195769f2 | ||
|
|
b22da81af8 | ||
|
|
d6acb3ed7e | ||
|
|
4254028c52 | ||
|
|
fcad1d2965 | ||
|
|
922d7910ef | ||
|
|
afcc12d99e | ||
|
|
a35445c65f | ||
|
|
c08b622b2d | ||
|
|
4b16601d33 | ||
|
|
25c98dbba9 | ||
|
|
923696b664 | ||
|
|
56ee56736b | ||
|
|
4db8d82c55 | ||
|
|
231d553824 | ||
|
|
b8af5b0a8e | ||
|
|
7cadf00570 | ||
|
|
03e79e62c2 | ||
|
|
237026c060 | ||
|
|
76230d2c08 | ||
|
|
01c5cd365b | ||
|
|
b10cefb160 | ||
|
|
f65067b1da | ||
|
|
e88fdbba29 | ||
|
|
7e5e5e87d8 | ||
|
|
b43996e553 | ||
|
|
9ce38726a2 | ||
|
|
6ce276e099 | ||
|
|
3fbb2f3e52 | ||
|
|
2f0c9d8269 | ||
|
|
9544d64ad8 | ||
|
|
dad16af711 | ||
|
|
0af6e64ad9 | ||
|
|
f3449ccd20 | ||
|
|
bc6f6e968e | ||
|
|
c6a733802b | ||
|
|
683e97766d | ||
|
|
dff24285ea | ||
|
|
b9410f2b6f | ||
|
|
4e47fe1dce | ||
|
|
9298aff783 | ||
|
|
3c168d4d2a | ||
|
|
4e4b8805d6 | ||
|
|
20fe515f20 | ||
|
|
374f4cd2bf | ||
|
|
6720458c7d | ||
|
|
f5a57fc1ef | ||
|
|
f05c29180d | ||
|
|
cae6f611d3 | ||
|
|
cdd75b687e | ||
|
|
8ec7aade9f | ||
|
|
28c39503eb | ||
|
|
869a49a0ab | ||
|
|
ebf998acb6 | ||
|
|
43257a295c | ||
|
|
1d568e1add | ||
|
|
5a71b81609 | ||
|
|
988f6d9912 | ||
|
|
3f16acc538 | ||
|
|
b7e559c7e1 | ||
|
|
cce132d146 | ||
|
|
d9f1bcf366 | ||
|
|
3da1a65fa0 | ||
|
|
ab3c124ffb | ||
|
|
aa212c3d0e | ||
|
|
04d58018e1 | ||
|
|
3d74d5e24d | ||
|
|
47070b8314 | ||
|
|
07c2649753 | ||
|
|
c26ec7789f | ||
|
|
7108084947 | ||
|
|
b5b2d07681 | ||
|
|
69f4e402e4 | ||
|
|
c25b174db5 | ||
|
|
fd5f549a9e | ||
|
|
53f35c5f5c | ||
|
|
9fc28d50c3 | ||
|
|
276c6ba115 | ||
|
|
1f8094938f | ||
|
|
d2cb95c39d | ||
|
|
e7e670805c | ||
|
|
286a29a49e | ||
|
|
2008a6438c | ||
|
|
583dc49477 | ||
|
|
81052ee18e | ||
|
|
46e28b9613 | ||
|
|
9c2c9c5274 | ||
|
|
87af2360df | ||
|
|
6e3f39963f | ||
|
|
d5c2ce7c2e | ||
|
|
079d1f3b8e | ||
|
|
67c4fd0ad0 | ||
|
|
d3744175bf | ||
|
|
a2840a2b42 | ||
|
|
386ea48432 | ||
|
|
d76f026d72 | ||
|
|
95ae40ff90 | ||
|
|
d5d7ba582a | ||
|
|
f09f82541b | ||
|
|
11f13aed53 | ||
|
|
ba20c14e28 | ||
|
|
deb8168329 | ||
|
|
8ba97cb408 | ||
|
|
44dae6936b | ||
|
|
922193475a | ||
|
|
55f0f8dae8 | ||
|
|
69d9eae5cd | ||
|
|
69f5f82804 | ||
|
|
34ffb94770 | ||
|
|
8c150ad7f6 | ||
|
|
bb137fd6e7 | ||
|
|
ace2234391 | ||
|
|
ebf749c40c | ||
|
|
283a3ecc9c | ||
|
|
99afc1b4f8 | ||
|
|
0d44746430 | ||
|
|
ff79a99825 | ||
|
|
66f8cb015d | ||
|
|
4d6243fa87 | ||
|
|
f82bdf4613 | ||
|
|
963ff93476 | ||
|
|
d0505c0d47 | ||
|
|
4f23aa677a | ||
|
|
95a1b598fe | ||
|
|
7c4f340cc0 | ||
|
|
3df0f03928 | ||
|
|
f3cc9bba5b | ||
|
|
1afdb40b48 | ||
|
|
325fdde8b4 | ||
|
|
2719e49718 | ||
|
|
02dce74b97 | ||
|
|
d0ce374731 | ||
|
|
6fcba975d0 | ||
|
|
dd0374560a | ||
|
|
ee69116761 | ||
|
|
03bf6ef473 | ||
|
|
acb82cf25e | ||
|
|
9d9198de0b | ||
|
|
ef8b180d6d | ||
|
|
c4f8fefe74 | ||
|
|
78d186fb44 | ||
|
|
85302a9ec1 | ||
|
|
5dbe456aae | ||
|
|
39f65fb1c9 | ||
|
|
82f4c0589c | ||
|
|
d5400f6502 | ||
|
|
e542bf1b6b | ||
|
|
2e8637da2f | ||
|
|
89bc73c6c3 | ||
|
|
f5be2d525a | ||
|
|
5fee61a207 | ||
|
|
b01a443ee5 | ||
|
|
34ec2da701 | ||
|
|
56c279015e | ||
|
|
54a8d70eb5 | ||
|
|
52b103dd13 | ||
|
|
8cabb4ee8e | ||
|
|
a4c3a44712 | ||
|
|
25418b9b4d | ||
|
|
44d7763580 | ||
|
|
4188f046ec | ||
|
|
68599d98c2 | ||
|
|
0006075b08 | ||
|
|
8eb40b5fe2 | ||
|
|
85bac75729 | ||
|
|
85eaa4ccee | ||
|
|
a46eef64a7 | ||
|
|
d392e030be | ||
|
|
62efe1ffb9 | ||
|
|
76d3afaef0 | ||
|
|
5dd2161c4b | ||
|
|
720ecacb1c | ||
|
|
8425f33363 | ||
|
|
4adabd33ac | ||
|
|
c9f1768cb9 | ||
|
|
84d250f781 | ||
|
|
7db6aabf65 | ||
|
|
ed62984cb2 | ||
|
|
f818ec49b8 | ||
|
|
1da6d92369 | ||
|
|
a6b483dcbc | ||
|
|
008c7df80d | ||
|
|
77fc2f7644 | ||
|
|
2661dc94f3 | ||
|
|
4b6fdd7bf0 | ||
|
|
2038c7fd5d | ||
|
|
dfb4baa3f9 | ||
|
|
12f8e87a0e | ||
|
|
bdecc5bade | ||
|
|
26d0858a60 | ||
|
|
e26559f512 | ||
|
|
71b0f51003 | ||
|
|
5ba7a7d2bc | ||
|
|
642d2e4b67 | ||
|
|
fd7ab539c8 | ||
|
|
f4bec9686d | ||
|
|
3d81c76160 | ||
|
|
b81a4c1d94 | ||
|
|
35c7c1f050 | ||
|
|
122af2effe | ||
|
|
c149954cc5 | ||
|
|
9e24626e87 | ||
|
|
6bd9c1d2b3 | ||
|
|
9bc7e1851a | ||
|
|
653cf56e0e | ||
|
|
debcf053eb | ||
|
|
e4ae690244 | ||
|
|
8e1b1db90d | ||
|
|
b753bf3323 | ||
|
|
202acce0c9 | ||
|
|
392df7b2e3 | ||
|
|
7f17ce3742 | ||
|
|
908c7bf33e | ||
|
|
43dc669332 | ||
|
|
2beb767ae5 | ||
|
|
a27fa9bf10 | ||
|
|
dcd0392423 | ||
|
|
1fd21ed21c | ||
|
|
9373b9c004 | ||
|
|
b647505280 | ||
|
|
67300567d3 | ||
|
|
23c261ba57 | ||
|
|
f4742dce50 | ||
|
|
0a24ac7388 | ||
|
|
3fb5e4d185 | ||
|
|
9ecb7240a4 | ||
|
|
42dcc502c7 | ||
|
|
90e9ec6962 | ||
|
|
ba0d729961 | ||
|
|
83162649bb | ||
|
|
12d7eaa0c2 | ||
|
|
5f4a697ce3 | ||
|
|
8b79cf9566 | ||
|
|
2a8ded6c8c | ||
|
|
57a02929d5 | ||
|
|
42cd2ef329 | ||
|
|
778e7c526e | ||
|
|
19319e1746 | ||
|
|
b0d5882fe1 | ||
|
|
12596b9a9b | ||
|
|
754aca794f | ||
|
|
cf448a6314 | ||
|
|
31f264169d | ||
|
|
eca8a5e5b8 | ||
|
|
e8c1850369 | ||
|
|
c4341463e8 | ||
|
|
c15701eebf | ||
|
|
c1d811c4bc | ||
|
|
0169d45ba8 | ||
|
|
c87b5c209d | ||
|
|
321506fcd1 | ||
|
|
be04695554 | ||
|
|
e69218504b | ||
|
|
7f0145315a | ||
|
|
ab145d85ec | ||
|
|
ff8e6981ff | ||
|
|
634ccb8ccd | ||
|
|
a1120e2685 | ||
|
|
2a6d4acc9d | ||
|
|
7c0f1bf23f | ||
|
|
c2c0814a94 | ||
|
|
cb7e12f6ba | ||
|
|
96e3e06d50 | ||
|
|
40d188948e | ||
|
|
e669f9d731 | ||
|
|
8bb8c56f74 | ||
|
|
9fdf1059a4 | ||
|
|
8b697ff0ee | ||
|
|
d269dd2e2f | ||
|
|
38ed55245f | ||
|
|
5019f59724 | ||
|
|
efa9ef75c0 | ||
|
|
d62369f478 | ||
|
|
52bf03d786 | ||
|
|
3be76ee2fa | ||
|
|
ea0982eede | ||
|
|
18a4fdded6 | ||
|
|
e3664272f0 | ||
|
|
049a0357e7 | ||
|
|
210a48cfb5 | ||
|
|
201b7ce9af | ||
|
|
25b1d65305 | ||
|
|
ece22b6b6a | ||
|
|
ffa1b3a758 | ||
|
|
4321d192ea | ||
|
|
6c5bb1b2e1 | ||
|
|
ccd1400423 | ||
|
|
8bf16d5275 | ||
|
|
a506302772 | ||
|
|
4a2f0c51a1 | ||
|
|
f3ad22e64a | ||
|
|
6e78dacd78 | ||
|
|
0d37b4c27d | ||
|
|
d6e34ca2ee | ||
|
|
233a904f2e | ||
|
|
6876b02c87 | ||
|
|
1559ba4bfc | ||
|
|
9f0a718198 | ||
|
|
9d200e6cbe | ||
|
|
7fb25b4154 | ||
|
|
f06fcde0d7 | ||
|
|
a3330c4258 | ||
|
|
1861cc7100 | ||
|
|
98c8516ef1 | ||
|
|
17c69678ab | ||
|
|
56653c53aa | ||
|
|
694d768174 | ||
|
|
8e6fa5f1d7 | ||
|
|
9e1e0f54d2 | ||
|
|
63e516c2b0 | ||
|
|
a9db2b0b92 | ||
|
|
6c61315067 | ||
|
|
11cdfe44af | ||
|
|
008348ce71 | ||
|
|
d3a5090e12 | ||
|
|
acdbdbddb1 | ||
|
|
48cf978391 | ||
|
|
e42a576cb2 | ||
|
|
9e32120cbb | ||
|
|
01b7b46908 | ||
|
|
35965df20d | ||
|
|
9d1867c77f | ||
|
|
6402c33299 | ||
|
|
3759a34229 | ||
|
|
bd74eba152 | ||
|
|
b54727fbad | ||
|
|
9c0584be74 | ||
|
|
bb2ed4615c | ||
|
|
361f8e1bc6 | ||
|
|
ead9d5b55c | ||
|
|
15687a28d5 | ||
|
|
467b082c34 | ||
|
|
51193309ea | ||
|
|
70a793ca9d | ||
|
|
e61b528c0e | ||
|
|
f386ac3bef | ||
|
|
ac73154005 | ||
|
|
af9ce3c224 | ||
|
|
77fcaa410a | ||
|
|
ca9de26f2b | ||
|
|
7f4734c0dd | ||
|
|
1c0857b53e | ||
|
|
44da27c07b | ||
|
|
0b743f005b | ||
|
|
2aba9ab47e | ||
|
|
629d9b78fa | ||
|
|
a477ddda45 | ||
|
|
9e81ab47be | ||
|
|
e75766b759 | ||
|
|
17b5090c18 | ||
|
|
c14a8df2ee | ||
|
|
17439daa6a | ||
|
|
4ba2c8ba75 | ||
|
|
7ae8b7f065 | ||
|
|
93bb19f69a | ||
|
|
18ebce2032 | ||
|
|
9beb03e771 | ||
|
|
1f7edcd08b | ||
|
|
ef99b06362 | ||
|
|
3c83779661 | ||
|
|
51a3a86022 | ||
|
|
70f7558db2 | ||
|
|
2363c02cf3 | ||
|
|
fbb82608cd | ||
|
|
9f39c23a13 | ||
|
|
d5e762d328 | ||
|
|
3cd0827785 | ||
|
|
dd0cd98861 | ||
|
|
d0603c86b6 | ||
|
|
28ee6a7c12 | ||
|
|
2c1e735403 | ||
|
|
539941281d | ||
|
|
7d0dda7e41 | ||
|
|
cf86447623 | ||
|
|
99adcdb1c9 | ||
|
|
06d5971be9 | ||
|
|
64969bc8ae | ||
|
|
ce0019b646 | ||
|
|
8f06085b24 | ||
|
|
5451b724fc | ||
|
|
0bff399af1 | ||
|
|
c9d4d53545 | ||
|
|
db67ccb0bb | ||
|
|
78b4c7d5a0 | ||
|
|
6dd7362a54 | ||
|
|
3a82bd7bdb | ||
|
|
9a0ed75a95 | ||
|
|
0ca8d4449c | ||
|
|
eedfddac2d | ||
|
|
7232e082de | ||
|
|
58220cda72 | ||
|
|
683f4a93b9 | ||
|
|
fca34eb122 | ||
|
|
49de862076 | ||
|
|
b6a2507794 | ||
|
|
b56ca0c2a4 | ||
|
|
59adeaddb3 | ||
|
|
c9bce5bbfb | ||
|
|
22abeb9f6c | ||
|
|
b642d00f9f | ||
|
|
c7c03d4709 | ||
|
|
e2a9072b80 | ||
|
|
55fef4b64b | ||
|
|
fd7f129f10 | ||
|
|
316dddc7cd | ||
|
|
1acfe86353 | ||
|
|
5de64e6d60 | ||
|
|
447a523662 | ||
|
|
8e45f720a8 | ||
|
|
ca2eed36b7 | ||
|
|
923e9f9596 | ||
|
|
258ae1ba5f | ||
|
|
2aabfafe1e | ||
|
|
d8fa94e6fa | ||
|
|
b42f218cfc | ||
|
|
f64522fbaf | ||
|
|
b14b65d62a | ||
|
|
4d62def9ff | ||
|
|
a992b9670d | ||
|
|
0a754fa286 | ||
|
|
2f2a5fd582 | ||
|
|
8932ed3f07 | ||
|
|
e7a0def1bc | ||
|
|
eec53fa294 | ||
|
|
09c66fe04f | ||
|
|
628cc4cce8 | ||
|
|
6a10e8ef31 | ||
|
|
eb572f41a6 | ||
|
|
484947c492 | ||
|
|
c3d2b01adf | ||
|
|
5470e730d2 | ||
|
|
29f5f70415 | ||
|
|
872836c541 | ||
|
|
8f50b616c5 | ||
|
|
bcd308c368 | ||
|
|
88ab69c288 | ||
|
|
53887242a1 | ||
|
|
1bf8ef1a4f | ||
|
|
a1c7532298 | ||
|
|
57ade13b2b | ||
|
|
d78f418c0d | ||
|
|
fd9da60aea | ||
|
|
35297ca0d3 | ||
|
|
8e3fbc97ca | ||
|
|
f1269830a0 | ||
|
|
656d2303f7 | ||
|
|
a3a2ce623e | ||
|
|
8fafa1af91 | ||
|
|
3b07c0cf3d | ||
|
|
56048b909f | ||
|
|
d17416ec79 | ||
|
|
3c7653bf0f | ||
|
|
d9018ae5f1 | ||
|
|
9f85f7c543 | ||
|
|
5944c1851b | ||
|
|
68901e1e40 | ||
|
|
790010703b | ||
|
|
f9df55f7d2 | ||
|
|
f5ce286932 | ||
|
|
9903a70379 | ||
|
|
1655ff2ded | ||
|
|
e4a46747dc | ||
|
|
2abbdc6ecb | ||
|
|
bfd48925e5 | ||
|
|
2c11302598 | ||
|
|
2aae1102b0 | ||
|
|
203258b4d6 | ||
|
|
4236ae3851 | ||
|
|
d9670a5945 | ||
|
|
fcccde406d | ||
|
|
9f73fec057 | ||
|
|
1d678f805f | ||
|
|
79011f835f | ||
|
|
656480feb6 | ||
|
|
31d5bd84d7 | ||
|
|
8aa545901a | ||
|
|
3e31d6e35f | ||
|
|
8b6b8bf68c | ||
|
|
2ff91a46c0 | ||
|
|
ca346011b7 | ||
|
|
53d4f1554a | ||
|
|
211a74941a | ||
|
|
5a1f614175 | ||
|
|
e2d6c41177 | ||
|
|
71fd6428c5 | ||
|
|
2f490be09b | ||
|
|
1e59c44d36 | ||
|
|
58b7a3ba16 | ||
|
|
c9986bc3a9 | ||
|
|
940b9ae30a | ||
|
|
b9fad28f5e | ||
|
|
22165cb2fc | ||
|
|
70be04a816 | ||
|
|
fde19c8667 | ||
|
|
9cea796671 | ||
|
|
91941d1f19 | ||
|
|
4d66756d93 | ||
|
|
a30f98f534 | ||
|
|
58a88f3911 | ||
|
|
71290315cf | ||
|
|
dd514c2781 | ||
|
|
4f4e0f38fc | ||
|
|
0d80226c64 | ||
|
|
106608bc89 | ||
|
|
88c5349196 | ||
|
|
b0893c7c6a | ||
|
|
b499de2926 | ||
|
|
34a64101cc | ||
|
|
2f83350eac | ||
|
|
37f2f71156 | ||
|
|
cdf5259ca9 | ||
|
|
939bceccb0 | ||
|
|
16a80779b9 | ||
|
|
9e3c1d4463 | ||
|
|
289de601c8 | ||
|
|
b0097f8908 | ||
|
|
06f39be1c2 | ||
|
|
1165767df2 | ||
|
|
1ca62b232b | ||
|
|
4adb2b399d | ||
|
|
c6d7124675 | ||
|
|
92683262f4 | ||
|
|
6e848b879a | ||
|
|
d21dd72d64 | ||
|
|
6a936488db | ||
|
|
0a4baca291 | ||
|
|
b93a08079e | ||
|
|
745e3e29da | ||
|
|
f3e13e7e5a | ||
|
|
39316314fa | ||
|
|
5d6b83d9cf | ||
|
|
42d979efdd | ||
|
|
3bddd708f7 | ||
|
|
feabf2e0d5 | ||
|
|
88bad37ec2 | ||
|
|
49b34e2293 | ||
|
|
bdf865d8e8 | ||
|
|
b3c83fdd33 | ||
|
|
2343302fc6 | ||
|
|
89436de7a7 | ||
|
|
6950b44bfc | ||
|
|
0aedbcf7b2 | ||
|
|
8a507154ca | ||
|
|
933655b4ac | ||
|
|
3ec970cc11 | ||
|
|
db36a0ee99 | ||
|
|
943e4f30d8 | ||
|
|
cd2479dfae | ||
|
|
4df3191092 | ||
|
|
5e2d5047af | ||
|
|
29b9a890d4 | ||
|
|
0b08a17e31 | ||
|
|
38d5b63a10 | ||
|
|
f9b565fa8c | ||
|
|
64febf7751 | ||
|
|
20b7bd497c | ||
|
|
6212d57f8c | ||
|
|
0638f7b83a | ||
|
|
1cbe7f5450 | ||
|
|
8eec43ed91 | ||
|
|
32a8b311eb | ||
|
|
3d859075d4 | ||
|
|
61cd83bf96 | ||
|
|
c6a720f256 | ||
|
|
1d46ddd16d | ||
|
|
17708fc156 | ||
|
|
a3b82d1831 | ||
|
|
01dbfc2bc7 | ||
|
|
a6afd45c63 | ||
|
|
f7dd10b820 | ||
|
|
040bb2983d | ||
|
|
52e5a8b43e | ||
|
|
61ab1b1266 | ||
|
|
a363ab5292 | ||
|
|
17cdeb72ef | ||
|
|
5e5039dbd2 | ||
|
|
cb84f612c9 | ||
|
|
240190db3f | ||
|
|
33eb5f8300 | ||
|
|
f91ce4eddf | ||
|
|
4c97a10bd0 | ||
|
|
aebdb1ad01 | ||
|
|
8b4cb4eb60 | ||
|
|
fb66b392c6 | ||
|
|
1ddf9f74b2 | ||
|
|
ee56c616ff | ||
|
|
f3f3f71811 | ||
|
|
f6b0b065d3 | ||
|
|
cbe18057b0 | ||
|
|
aa8b4120a8 | ||
|
|
1f30e25681 | ||
|
|
c9d0f2b984 | ||
|
|
b4354b7694 | ||
|
|
572968fee3 | ||
|
|
77c7c9ab97 | ||
|
|
4b8442896b | ||
|
|
33884b2184 | ||
|
|
ba9371854f | ||
|
|
de69ea26e8 | ||
|
|
715ffda28b | ||
|
|
523898ab9c | ||
|
|
3d8aa88e26 | ||
|
|
4ad0f3de2b | ||
|
|
748a757306 | ||
|
|
091d8845d5 | ||
|
|
4e28a7a513 | ||
|
|
5cbe2b7b6a | ||
|
|
6c0a6b70e0 | ||
|
|
63f2ef8d1c | ||
|
|
f672b39cc9 | ||
|
|
2387647d30 | ||
|
|
0318cdd33c | ||
|
|
b67db8deaa | ||
|
|
ca5293bf54 | ||
|
|
e35ea565d1 | ||
|
|
7f589ebbc2 | ||
|
|
8be598f504 | ||
|
|
6eb6c45c98 | ||
|
|
61b5942adf | ||
|
|
e8e2b812c9 | ||
|
|
fc072100fa | ||
|
|
7bfee012d5 | ||
|
|
b8e3e1118d | ||
|
|
db05ea2b78 | ||
|
|
73693c18fc | ||
|
|
b11f21c25f | ||
|
|
2c114fcb5e | ||
|
|
3bc44b01c0 | ||
|
|
66415eed6e | ||
|
|
1b48d6cb8c | ||
|
|
a00a73ef18 | ||
|
|
e06e84b293 | ||
|
|
5d7c6d1bca | ||
|
|
a4e0cf6300 | ||
|
|
8cd18a48e4 | ||
|
|
b738ccd91e | ||
|
|
17fcbed92c | ||
|
|
c586f6dc1b | ||
|
|
a8db594012 | ||
|
|
fbcd8e02f2 | ||
|
|
8ed013d278 | ||
|
|
32d09bcd1e | ||
|
|
b40ecee4b9 | ||
|
|
5564833bd2 | ||
|
|
7d25a65b10 | ||
|
|
2c952de21a | ||
|
|
b599f91e33 | ||
|
|
e9b51513e9 | ||
|
|
926e4b6bad | ||
|
|
4947ac2965 | ||
|
|
ef41bcef70 | ||
|
|
822fc590d9 | ||
|
|
9b0029b9c2 | ||
|
|
0da484be2c | ||
|
|
ff90bb59bf | ||
|
|
3508e582f1 | ||
|
|
fd96878c4b | ||
|
|
f201d80d40 | ||
|
|
b3cf9c8759 | ||
|
|
176d71dd85 | ||
|
|
89ddc7cbb6 | ||
|
|
de3e25683e | ||
|
|
5ca461160b | ||
|
|
151f27d502 | ||
|
|
4ba9c16f74 | ||
|
|
44489e7029 | ||
|
|
785b9d47b7 | ||
|
|
d1d7d0cb27 | ||
|
|
c86b2b5e42 | ||
|
|
fe4f3b8fdf | ||
|
|
a5b15e9d0f | ||
|
|
5c1f462bb9 | ||
|
|
573c846112 | ||
|
|
53a9d6115e | ||
|
|
7bb6d04fc7 | ||
|
|
8ae9b71e41 | ||
|
|
ce08f436db | ||
|
|
cfa2203c62 | ||
|
|
b05bb9e136 | ||
|
|
77ce9ed6f1 | ||
|
|
48a04aed75 | ||
|
|
23065f54c0 | ||
|
|
b87cc8b31e | ||
|
|
258d67b0ac | ||
|
|
9306394078 | ||
|
|
05b75f3f13 | ||
|
|
d3c2ca5656 | ||
|
|
b7e9db5e73 | ||
|
|
33da8bd711 | ||
|
|
e355606b11 | ||
|
|
efb7c459a2 | ||
|
|
c59a5bae48 | ||
|
|
a79f595543 | ||
|
|
c4471d1877 | ||
|
|
410ac8129d | ||
|
|
8e4dbae428 | ||
|
|
657581dbdf | ||
|
|
12aad659dd | ||
|
|
872ebdaf90 | ||
|
|
9451240941 | ||
|
|
6b4928ad96 | ||
|
|
865a21938c | ||
|
|
bb41252dab | ||
|
|
75b3893daf | ||
|
|
6c5251feb0 | ||
|
|
5310184f96 | ||
|
|
6dd44ff1c0 | ||
|
|
5514ebe859 | ||
|
|
64385c4eae | ||
|
|
175ef0a55d | ||
|
|
d19fd0cfae | ||
|
|
d85339b9f2 | ||
|
|
7ee8b2d1bf | ||
|
|
21199cc7b4 | ||
|
|
0ea384d575 | ||
|
|
12fb393a43 | ||
|
|
097ecef06b | ||
|
|
487611521d | ||
|
|
a2f7246f0e | ||
|
|
9c5eca92e4 | ||
|
|
448426a6ac | ||
|
|
4aec587979 | ||
|
|
bea78b3271 | ||
|
|
c87e9fb2ce | ||
|
|
0625ab7a9e | ||
|
|
89ef440c14 | ||
|
|
5f13668fa0 | ||
|
|
3eb79580c2 | ||
|
|
6d072e97c8 | ||
|
|
af5390d416 | ||
|
|
09486ed188 | ||
|
|
b7290f01d8 | ||
|
|
aa6e6db8c7 | ||
|
|
956ee981c0 | ||
|
|
88a02076af | ||
|
|
4322b246aa | ||
|
|
b0f21e2b50 | ||
|
|
f945426874 | ||
|
|
ff732e10f8 | ||
|
|
94e31647bd | ||
|
|
5fd13c22ad | ||
|
|
05d5fcfdf8 | ||
|
|
040d436b3f | ||
|
|
8602a32b7e | ||
|
|
7b13292e35 | ||
|
|
b809c243af | ||
|
|
d67b120a41 | ||
|
|
1b65779905 | ||
|
|
6f781902ae | ||
|
|
f0408c347f | ||
|
|
9062e36722 | ||
|
|
b4d2663beb | ||
|
|
f30b4697d4 | ||
|
|
3cb460d5d8 | ||
|
|
281a332784 | ||
|
|
5336d87c15 | ||
|
|
3d5e92e3ef | ||
|
|
aac2d4dcef | ||
|
|
66d5a7e7cf | ||
|
|
4eee789dd3 | ||
|
|
9d4b710a48 | ||
|
|
4e58b78102 | ||
|
|
3d40de75c5 | ||
|
|
cab55e9bc1 | ||
|
|
dccc20b402 | ||
|
|
ee8653f62c | ||
|
|
bb3e6cb427 | ||
|
|
95e1d1fae6 | ||
|
|
af41bc84e6 | ||
|
|
9a858a9107 | ||
|
|
697efd9757 | ||
|
|
e5f420d2bc | ||
|
|
ea26c12b23 | ||
|
|
fcb5aba9f0 | ||
|
|
a1ade48e8f | ||
|
|
40e836c67e | ||
|
|
d37ce48e60 | ||
|
|
24cb5cd379 | ||
|
|
c1f9cc0bc5 | ||
|
|
6e02c45ca4 | ||
|
|
55570e54e1 | ||
|
|
5097007407 | ||
|
|
777b33b873 | ||
|
|
808caca607 | ||
|
|
4b558c9e17 | ||
|
|
96023f94d9 | ||
|
|
957956ba6d | ||
|
|
1bc3244db9 | ||
|
|
4074ea4c41 | ||
|
|
405ba44d37 | ||
|
|
716c925a85 | ||
|
|
b05a74b106 | ||
|
|
de0a02f507 | ||
|
|
7dec2d399b | ||
|
|
386ef1e654 | ||
|
|
67c5950df3 | ||
|
|
0749a642f5 | ||
|
|
f421af8b80 | ||
|
|
095f300bf6 | ||
|
|
46aa90062b | ||
|
|
775f3edffd | ||
|
|
96a9c27116 | ||
|
|
276125a33b | ||
|
|
ebe08412ad | ||
|
|
f0198354d9 | ||
|
|
7395c28455 | ||
|
|
0abe996409 | ||
|
|
f505320a73 | ||
|
|
c656a6b966 | ||
|
|
900dbd1cbe | ||
|
|
740eafe41d | ||
|
|
1dae3c383e | ||
|
|
c15bbaac31 | ||
|
|
5d0493f652 | ||
|
|
d2bee34d4c | ||
|
|
bbc3fe259b | ||
|
|
931b292126 | ||
|
|
a29cd89923 | ||
|
|
c4a6de3fc9 | ||
|
|
c86a1a6710 | ||
|
|
76dd7480e6 | ||
|
|
720f6dbaac | ||
|
|
d6df288380 | ||
|
|
d60145229b | ||
|
|
21b236e5e4 | ||
|
|
4f19ba3065 | ||
|
|
94cf71ecfa | ||
|
|
33781ac4a2 | ||
|
|
d5f1969d55 | ||
|
|
61cecf8b1b | ||
|
|
73afd72e1d | ||
|
|
62603f2664 | ||
|
|
c68be4eb2b | ||
|
|
1b050b98f5 | ||
|
|
5272e42b0d | ||
|
|
b338e492fc | ||
|
|
0d1550da91 | ||
|
|
6a98974bd0 | ||
|
|
a4e858b111 | ||
|
|
c8f386db97 | ||
|
|
71025013f8 | ||
|
|
c898a4d7ba | ||
|
|
54763a61f8 | ||
|
|
8b68d1a03b | ||
|
|
babf46692d | ||
|
|
8515e27d82 | ||
|
|
579d14fbc1 | ||
|
|
4c80978ec6 | ||
|
|
e404fd39dd | ||
|
|
5072138893 | ||
|
|
12ff780089 | ||
|
|
ce61840e3b | ||
|
|
1eefb9052b | ||
|
|
287c81db89 | ||
|
|
39c1c94272 | ||
|
|
8201cae770 | ||
|
|
6e48092746 | ||
|
|
d21a494a27 | ||
|
|
a3e5507faa | ||
|
|
3992c1ae9b | ||
|
|
c3e52ba8ab | ||
|
|
441a5c2b30 | ||
|
|
4a7da3ce3b | ||
|
|
d0070040da | ||
|
|
8371a8a0c6 | ||
|
|
5fda838346 | ||
|
|
f9561fd7c5 | ||
|
|
c5078fb13c | ||
|
|
2c957de2fc | ||
|
|
5442d2b1fa | ||
|
|
9749f8ebae | ||
|
|
c4e591a57d | ||
|
|
6f36bc6d38 | ||
|
|
91f1af0a93 | ||
|
|
a5ca0ca6e7 | ||
|
|
bdd9fe4066 | ||
|
|
9cd131a178 | ||
|
|
116cc7998c | ||
|
|
0a1dc04875 | ||
|
|
a07491cfdc | ||
|
|
f6e5632c84 | ||
|
|
75c04f0833 | ||
|
|
976a18c1d5 | ||
|
|
3fb9cfb4ae | ||
|
|
c7bd3b918c | ||
|
|
f0fdf3d063 | ||
|
|
2ae568dcf5 | ||
|
|
6d3670c7d8 | ||
|
|
6831a25675 | ||
|
|
029b2f6aac | ||
|
|
a50e62e44b | ||
|
|
c0e1a1d32c | ||
|
|
f9f1340208 | ||
|
|
5e50b89164 | ||
|
|
48a4efc51a | ||
|
|
bc6b9331a9 | ||
|
|
ecbb1ed8cb | ||
|
|
50bb704da5 | ||
|
|
e195b78e1d | ||
|
|
77a165e0d9 | ||
|
|
7608f85f13 | ||
|
|
0786395b56 | ||
|
|
9dd4cacae2 | ||
|
|
7f3f6097e7 | ||
|
|
ccf71e23e8 | ||
|
|
49b65a1b57 | ||
|
|
e1e01d6586 | ||
|
|
596f294b01 | ||
|
|
cbb4860fcd | ||
|
|
adabdfdfc7 | ||
|
|
0a0276bcdb | ||
|
|
2dc3c64386 | ||
|
|
a34510536d | ||
|
|
bcf130c07c | ||
|
|
f4e6eac3b6 | ||
|
|
415d38ae62 | ||
|
|
49694f6a3f | ||
|
|
85e05fa5d6 | ||
|
|
ac9609f58f | ||
|
|
201b61d5b3 | ||
|
|
a43abf24e4 | ||
|
|
f9636b6cd2 | ||
|
|
d1f2075bde | ||
|
|
73b9ca54cb | ||
|
|
db3369272a | ||
|
|
1835624bad | ||
|
|
303724980c | ||
|
|
79a567d885 | ||
|
|
97122fb577 | ||
|
|
eaf916f999 | ||
|
|
7ecee7821a | ||
|
|
21fbbe83a7 | ||
|
|
57e2de2077 | ||
|
|
f7f3c02585 | ||
|
|
6598178343 | ||
|
|
d45b042d3e | ||
|
|
41047fe4c3 | ||
|
|
30c9d97dda | ||
|
|
55196742be | ||
|
|
b50d724114 | ||
|
|
70b6897dc1 | ||
|
|
50128c8b39 | ||
|
|
999163fbd6 | ||
|
|
0f81b3dd2f | ||
|
|
737b75d278 | ||
|
|
31739577c2 | ||
|
|
2c656e457c | ||
|
|
2bd9f5da7f | ||
|
|
e6b7d9f65b | ||
|
|
2861e652b4 | ||
|
|
37cb9372c2 | ||
|
|
4c732c8894 | ||
|
|
503c382f88 | ||
|
|
fde57df7ae | ||
|
|
3a299b9680 | ||
|
|
32445de365 | ||
|
|
30d02e3a34 | ||
|
|
42d0d485a9 | ||
|
|
ccea1e9147 | ||
|
|
7185fdc990 | ||
|
|
248db75cd6 | ||
|
|
631289a38d | ||
|
|
a2f29bf595 | ||
|
|
534f1b63c5 | ||
|
|
3d700aa654 | ||
|
|
2dba4046fa | ||
|
|
b78d672a43 | ||
|
|
11f20cded1 | ||
|
|
8b5662473f | ||
|
|
65e1606daa | ||
|
|
d09ef9eb52 | ||
|
|
ee3f950a67 | ||
|
|
e0d45e6a09 | ||
|
|
90504fc499 | ||
|
|
40d9191955 | ||
|
|
6ad6bb46c4 | ||
|
|
675d57df50 | ||
|
|
ddd07001f3 | ||
|
|
b3a8fc7cb1 | ||
|
|
62fa2bc518 | ||
|
|
e93240f023 | ||
|
|
7203c97e8f | ||
|
|
4258c23867 | ||
|
|
3e5a143625 | ||
|
|
c902a1545b | ||
|
|
8c0f391815 | ||
|
|
5d8a689d5e | ||
|
|
0a86a70fe7 | ||
|
|
9095dc69ac | ||
|
|
c6b27b3692 | ||
|
|
5a4ce9ef2b | ||
|
|
1b0eebe1e3 | ||
|
|
2423f7f3b4 | ||
|
|
d2d11ccf63 | ||
|
|
46e9abdc75 | ||
|
|
0672533b3e | ||
|
|
f5d08be477 | ||
|
|
69fe0621d4 | ||
|
|
01e9d7902d | ||
|
|
28de8d132c | ||
|
|
fdba711d28 | ||
|
|
1b3ea1eeb4 | ||
|
|
8826293c88 | ||
|
|
300559695b | ||
|
|
20c742d8a2 | ||
|
|
b1d40b8626 | ||
|
|
49e0c83126 | ||
|
|
41a2548611 | ||
|
|
1d2b6c3c67 | ||
|
|
274c3dc3a8 | ||
|
|
f23fed34e8 | ||
|
|
ff1c6de86c | ||
|
|
868db99b17 | ||
|
|
a9eb7c6cfc | ||
|
|
25ec655e4f | ||
|
|
f0ccce76fe | ||
|
|
205f406485 | ||
|
|
672907bbbb | ||
|
|
f747e76b73 | ||
|
|
4cc4534d81 | ||
|
|
67696fe3ba | ||
|
|
f4f9254dad | ||
|
|
890ed775a3 | ||
|
|
849e345371 | ||
|
|
0c760f184c | ||
|
|
19b4ecdc39 | ||
|
|
b64a443f72 | ||
|
|
1fb7bdd595 | ||
|
|
763212eafd | ||
|
|
ea5d29a702 | ||
|
|
4df101cf77 | ||
|
|
86cb9da735 | ||
|
|
b8669b249e | ||
|
|
6e6f15df24 | ||
|
|
1690013711 | ||
|
|
13c5951e26 | ||
|
|
3cc242b591 | ||
|
|
bce38b7163 | ||
|
|
db73c9d5b5 | ||
|
|
ccb9e3ee2d | ||
|
|
82d5d4d0ae | ||
|
|
8d5bf1fb20 | ||
|
|
49341483da | ||
|
|
9e839d4977 | ||
|
|
ffca5e7eea | ||
|
|
7b7bea5424 | ||
|
|
c732d8fffd | ||
|
|
334bd8ebbe | ||
|
|
7fe8bf03a0 | ||
|
|
619516260d | ||
|
|
803be5b986 | ||
|
|
514857c10e | ||
|
|
15d33a144d | ||
|
|
235dacc74a | ||
|
|
c8d7ee62ba | ||
|
|
e34ad6fefd | ||
|
|
5d8673a3c1 | ||
|
|
3a4c895280 | ||
|
|
ac2310a405 | ||
|
|
8b95dabfe3 | ||
|
|
882a588264 | ||
|
|
327ea43c67 | ||
|
|
1d4e73b9f8 | ||
|
|
d6320cc2c0 | ||
|
|
7a4387c60d | ||
|
|
e1791225ae | ||
|
|
fdb611cc42 | ||
|
|
8d3a8fbefe | ||
|
|
be152b6a56 | ||
|
|
9c45d5a27e | ||
|
|
f22fcb8bcd | ||
|
|
8dc5365ee2 | ||
|
|
5b6ebbc825 | ||
|
|
f389c4fcab | ||
|
|
5c2069890f | ||
|
|
736e0dd46e | ||
|
|
5b1812f95b | ||
|
|
f1d144cd6c | ||
|
|
dde1992fdd | ||
|
|
62cf108700 | ||
|
|
af4b560b86 | ||
|
|
5dbae94e04 | ||
|
|
8998060d85 | ||
|
|
00d56fb0fc | ||
|
|
b59e2b5afa | ||
|
|
ae5edefdcd | ||
|
|
a94dc6ee44 | ||
|
|
bb8c095127 | ||
|
|
8bba69ffd0 | ||
|
|
098b4aa465 | ||
|
|
699f58fb83 | ||
|
|
de9e545542 | ||
|
|
cb928ed3d5 | ||
|
|
1b7caa1a29 | ||
|
|
e9abe176bc | ||
|
|
6b9529e11a | ||
|
|
c6149aacef | ||
|
|
800fe4a73f | ||
|
|
e10980d445 | ||
|
|
0f7cde023b | ||
|
|
4e9aecda90 | ||
|
|
67dc1a9dd2 | ||
|
|
ca163f0ee6 | ||
|
|
b162f1c8e1 | ||
|
|
27944cb611 | ||
|
|
10e0431e48 | ||
|
|
e0f6ba08d6 | ||
|
|
31bbe80758 | ||
|
|
de3322609e | ||
|
|
7403faa063 | ||
|
|
f6f0b0f975 | ||
|
|
803d0d9656 | ||
|
|
03174c91d0 | ||
|
|
9bcfd58580 | ||
|
|
056e59672b | ||
|
|
0b6993987f | ||
|
|
68f2363f5d | ||
|
|
43c4c6dfcc | ||
|
|
c585351bdc | ||
|
|
433c4a721e | ||
|
|
c9ff0ab2e9 | ||
|
|
16945c9922 | ||
|
|
8bc452a466 | ||
|
|
fe0e191fb3 | ||
|
|
7d48c2884e | ||
|
|
e34dde3d15 | ||
|
|
94efede93c | ||
|
|
c0518be1f1 | ||
|
|
50ca44c79f | ||
|
|
7d8bb78e5c | ||
|
|
33f43cc1b0 | ||
|
|
7d1b0fbe79 | ||
|
|
ce47124e8f | ||
|
|
f59e5d48ed | ||
|
|
507e46844e | ||
|
|
fed137a8a9 | ||
|
|
794ff2dae8 | ||
|
|
4765c09703 | ||
|
|
16a27ab244 | ||
|
|
00a7c31ffd | ||
|
|
a52fe9528e | ||
|
|
b8baead70c | ||
|
|
abd8681341 | ||
|
|
4dc47bd3ac | ||
|
|
bc8cceebf7 | ||
|
|
872d829201 | ||
|
|
5c7afe8aae | ||
|
|
387813bfb2 | ||
|
|
cf5a50469f | ||
|
|
f4bed8a04c | ||
|
|
05664a6f20 | ||
|
|
565c021730 | ||
|
|
2221194450 | ||
|
|
5c3e9c9083 | ||
|
|
6d82503eb1 | ||
|
|
4abe85be57 | ||
|
|
f5af756397 | ||
|
|
9e6cc7b236 | ||
|
|
0c0a7d19eb | ||
|
|
f968b86652 | ||
|
|
765ef3b486 | ||
|
|
746c6ff9c3 | ||
|
|
fdebd3e02f | ||
|
|
0e4c5dd176 | ||
|
|
42582adb66 | ||
|
|
9e196cb470 | ||
|
|
f8bca156d4 | ||
|
|
30239b3025 | ||
|
|
54a8df87b9 | ||
|
|
b485c3048b | ||
|
|
f2fc4173c3 | ||
|
|
37e435bd00 | ||
|
|
3b8ee74e38 | ||
|
|
afd96b2460 | ||
|
|
58d7d86e51 | ||
|
|
a7c9bd30d4 | ||
|
|
491089754d | ||
|
|
b5a74fb973 | ||
|
|
71c418725f | ||
|
|
427f696fb0 | ||
|
|
b927277809 | ||
|
|
d4380339c1 | ||
|
|
d7bf7dc412 | ||
|
|
355ff09cce | ||
|
|
3dafbd852e | ||
|
|
c7a5504789 | ||
|
|
5f1c67b47c | ||
|
|
561ac17248 | ||
|
|
5569385ee1 | ||
|
|
b1c87da2b0 | ||
|
|
e17275ee57 | ||
|
|
63306899a2 | ||
|
|
7966af1e9c | ||
|
|
4c0e1e501c | ||
|
|
0eba80912f | ||
|
|
af2e4ce2cd | ||
|
|
85088dc5df | ||
|
|
4eecf90f33 | ||
|
|
2242e2160f | ||
|
|
b2ac835466 | ||
|
|
50a5c5bcf8 | ||
|
|
81ebcc161e | ||
|
|
fc42726ea0 | ||
|
|
897f791940 | ||
|
|
4d7cd6db5f | ||
|
|
f9a845b382 | ||
|
|
06e89c1caa | ||
|
|
738d93215d | ||
|
|
9a07032055 | ||
|
|
5426712311 | ||
|
|
f95bd0bcd9 | ||
|
|
f69155b4f7 | ||
|
|
a3c69cf41d | ||
|
|
a9ba6a8cd1 | ||
|
|
2b90a8afa2 | ||
|
|
324c86acd5 | ||
|
|
2c877a4a34 | ||
|
|
3f8f3de28e | ||
|
|
ad9e242a7a | ||
|
|
566ce06f4a | ||
|
|
c710c7303f | ||
|
|
cc6a20d3e6 | ||
|
|
86646ec555 | ||
|
|
02e51f4217 | ||
|
|
74fcfed4e2 | ||
|
|
641b71e2cd | ||
|
|
8d66b00c73 | ||
|
|
19400ba253 | ||
|
|
29270e0378 | ||
|
|
5b913003e0 | ||
|
|
4b15328767 | ||
|
|
b7d0e4835e | ||
|
|
e60e1cdf23 | ||
|
|
3efab8d3df | ||
|
|
d43a36c32a | ||
|
|
6b5a970949 | ||
|
|
b1644bc9ad | ||
|
|
13fef1e5d3 | ||
|
|
e37d51cab6 | ||
|
|
52a3e8a261 | ||
|
|
e2e05ad89e | ||
|
|
f2e8399cc8 | ||
|
|
5341b04d68 | ||
|
|
b82ad19ed2 | ||
|
|
e805f8e263 | ||
|
|
1f5c579ef4 | ||
|
|
240cc289e6 | ||
|
|
7fa82900cb | ||
|
|
2f03e71e67 | ||
|
|
781f274d19 | ||
|
|
a8f804a618 | ||
|
|
98cce7dcd3 | ||
|
|
b3e3a31240 | ||
|
|
9828701de1 | ||
|
|
9870bfb9cd | ||
|
|
6da158388b | ||
|
|
24c0b01c38 | ||
|
|
588237ef30 | ||
|
|
e8f29be350 | ||
|
|
a28e888b36 | ||
|
|
cafce9ed23 | ||
|
|
8c4e29240c | ||
|
|
dfc3295a2c | ||
|
|
2d2b097fab | ||
|
|
d762a6b51f | ||
|
|
6a51672164 | ||
|
|
c844aaa7a6 | ||
|
|
a05fed9369 | ||
|
|
c26deb6b38 | ||
|
|
ffa5625134 | ||
|
|
bdccb1215a | ||
|
|
d966ba63e2 | ||
|
|
ec362ecbe2 | ||
|
|
56a0165a4e | ||
|
|
cedfad541d | ||
|
|
b31475c622 | ||
|
|
d03d6f6fd9 | ||
|
|
8fb0a9594c | ||
|
|
4eeba88905 | ||
|
|
8c1678a8c7 | ||
|
|
d799963870 | ||
|
|
7bba1d911b | ||
|
|
2e65434568 | ||
|
|
b416f5c0c8 | ||
|
|
8f199239b8 | ||
|
|
2a03a0087d | ||
|
|
f7cc125cac | ||
|
|
16eb935469 | ||
|
|
c70bb0ec28 | ||
|
|
0f85671630 | ||
|
|
78c014399f | ||
|
|
f69d236a4a | ||
|
|
0024824a6e | ||
|
|
210de0c66b | ||
|
|
5cce6529a4 | ||
|
|
bcc3463ff4 | ||
|
|
7cbe872af8 | ||
|
|
9f2d908316 | ||
|
|
3c1547925a | ||
|
|
fbd792ac7c | ||
|
|
8bd7a9d18e | ||
|
|
ede45f535e | ||
|
|
393816e7bd | ||
|
|
0fb95ebe66 | ||
|
|
7c7ae34eeb | ||
|
|
d578efba35 | ||
|
|
8dbf4cbe80 | ||
|
|
b5cd1e0fed | ||
|
|
6eae6df76f | ||
|
|
f5faac8859 | ||
|
|
4b6e41a939 | ||
|
|
6092422e10 | ||
|
|
c906041aa8 | ||
|
|
880bf06290 | ||
|
|
9efc29e3d1 | ||
|
|
d6957921f0 | ||
|
|
db13fba7ea | ||
|
|
49ebbe4bcd | ||
|
|
171b0b183b | ||
|
|
c80e406e95 | ||
|
|
dd10cf945c | ||
|
|
8f8455b24d | ||
|
|
256849e02a | ||
|
|
d46ad01ee0 | ||
|
|
5fb781dfde | ||
|
|
48aaa27bf7 | ||
|
|
c4ccaebbbb | ||
|
|
7eaaad51de | ||
|
|
42bdb003ee | ||
|
|
f8b5c2977a | ||
|
|
5727148f2b | ||
|
|
72eab3b37e | ||
|
|
4b930f58e9 | ||
|
|
0a2724d8c7 | ||
|
|
5de212d907 | ||
|
|
f7fb083aba | ||
|
|
4e6e03ef50 | ||
|
|
d50c0f139d | ||
|
|
758225dc17 | ||
|
|
44485c2b26 | ||
|
|
8d10a52525 | ||
|
|
b3c0728de2 | ||
|
|
0b8691c6e5 | ||
|
|
a11ad11d06 | ||
|
|
bbae8cb88f | ||
|
|
4454204455 | ||
|
|
318a21e267 | ||
|
|
e71f4760db | ||
|
|
a5450be32e | ||
|
|
8b8d2a6535 | ||
|
|
1b6947e56c | ||
|
|
7979cef06a | ||
|
|
23ef836b48 | ||
|
|
766bbd6c6b | ||
|
|
64eb5a6082 | ||
|
|
8a4670e127 | ||
|
|
b1f649bca5 | ||
|
|
6d3485e798 | ||
|
|
82a3c2a557 | ||
|
|
e80834d783 | ||
|
|
7fdb7439e0 | ||
|
|
5d47833ae1 | ||
|
|
b1bffea9c7 | ||
|
|
e01b00aa54 | ||
|
|
47499c6db4 | ||
|
|
f327535eda | ||
|
|
cf122b6269 | ||
|
|
fe1b9ee6b8 | ||
|
|
907c57e324 | ||
|
|
3103f07e03 | ||
|
|
b14d74dd4d | ||
|
|
8393ba9dab | ||
|
|
eb3d1fa93c | ||
|
|
3a4d4c940c | ||
|
|
97741d41c5 | ||
|
|
7f5713b80a | ||
|
|
cb642ef658 | ||
|
|
5e2d0cf54e | ||
|
|
9aaa0fdce0 | ||
|
|
00baddf34c | ||
|
|
f97d3a76e7 | ||
|
|
5edf819524 | ||
|
|
dd6fff1c62 | ||
|
|
6a1102d4c0 | ||
|
|
7725192a0d | ||
|
|
2bfa73257f | ||
|
|
610f46d83a | ||
|
|
c1badc1fa2 | ||
|
|
0d01cede03 | ||
|
|
63921e327d | ||
|
|
aab01b55db | ||
|
|
0da5803f5a | ||
|
|
a28eea5767 | ||
|
|
fa0b8f3368 | ||
|
|
12a373810c | ||
|
|
d57d08fd01 | ||
|
|
4339d21cf1 | ||
|
|
1960ac8d25 | ||
|
|
2ab04a4e32 | ||
|
|
985873c497 | ||
|
|
709a67d9bf | ||
|
|
9731ce5a40 | ||
|
|
cacaf487c3 | ||
|
|
135cb86215 | ||
|
|
d04fe0d3ea | ||
|
|
30151c99c7 | ||
|
|
ade482c17e | ||
|
|
87da56fb1e | ||
|
|
adb21782b8 | ||
|
|
3e5cda3405 | ||
|
|
dc30edf51c | ||
|
|
dff00ea91e | ||
|
|
0f48e6c36e | ||
|
|
a0800c9f15 | ||
|
|
2bcf581a23 | ||
|
|
22b6549a34 | ||
|
|
dacf96895a | ||
|
|
c37be7f5fb | ||
|
|
cf792891f1 | ||
|
|
f5ea725796 | ||
|
|
6bedfdf25a | ||
|
|
7cf5c582d2 | ||
|
|
9666e752b1 | ||
|
|
78ffcdd9a9 | ||
|
|
20d2c0571c | ||
|
|
9963b32e59 | ||
|
|
b048236c1a | ||
|
|
c19888c12c | ||
|
|
d0ff0db698 | ||
|
|
5990651070 | ||
|
|
25f2c82ae8 | ||
|
|
6283f3b63c | ||
|
|
9e1dbd4b49 | ||
|
|
b88dfcb42a | ||
|
|
c215481531 | ||
|
|
a9c86774da | ||
|
|
a8c916955f | ||
|
|
342087bdfa | ||
|
|
cbaea8d63b | ||
|
|
cd81e8a8f2 | ||
|
|
278ef0bdcf | ||
|
|
fa05e18278 | ||
|
|
20ce283fa7 | ||
|
|
6424b3cde0 | ||
|
|
da18e177f1 | ||
|
|
c326751085 | ||
|
|
6d19709b65 | ||
|
|
677da6a0fd | ||
|
|
64a958c85d | ||
|
|
1751fe114d | ||
|
|
882b97cfd2 | ||
|
|
3ddabe8b2c | ||
|
|
fdcd50aab4 | ||
|
|
9777c2801d | ||
|
|
93bbf67afc | ||
|
|
c184be5511 | ||
|
|
dacd5dcba8 | ||
|
|
80dd162e0d | ||
|
|
db4b256a28 | ||
|
|
3458489936 | ||
|
|
e420bf22b6 | ||
|
|
cc83f54694 | ||
|
|
d414d47c78 | ||
|
|
a40c12bb88 | ||
|
|
d8e2dd4c89 | ||
|
|
e2e582f1f6 | ||
|
|
5508baf1eb | ||
|
|
0154958243 | ||
|
|
a8e8a31b41 | ||
|
|
ef87affd4d | ||
|
|
1c64db575c | ||
|
|
ef2500584c | ||
|
|
8a03836160 | ||
|
|
f0ae10a20e | ||
|
|
39a5d02225 | ||
|
|
5b9bdcac1b | ||
|
|
eb92da84a1 | ||
|
|
2a06e7b216 | ||
|
|
f1072cc31f | ||
|
|
b379c5f9c8 | ||
|
|
e1f4f9ac3e | ||
|
|
b2d9970fc1 | ||
|
|
900c1f3e8d | ||
|
|
02545a54b3 | ||
|
|
632a83c48e | ||
|
|
fc64e6349e | ||
|
|
ca8232a3c1 | ||
|
|
f29312eb84 | ||
|
|
c06f34fa35 | ||
|
|
83986ea98a | ||
|
|
81163e3c0c | ||
|
|
f3ba9ce7f4 | ||
|
|
f1e602996a | ||
|
|
35812d0096 | ||
|
|
d564ec944c | ||
|
|
65e893b9cd | ||
|
|
64a54d8ad8 | ||
|
|
571ee718ba | ||
|
|
3c7cc4d440 | ||
|
|
3408810748 | ||
|
|
acb54d8b9d | ||
|
|
a1e89aa8d5 | ||
|
|
c75e1aa5ed | ||
|
|
2b663089b5 | ||
|
|
b868ef23bc | ||
|
|
e99ef12cb1 | ||
|
|
1720e99397 | ||
|
|
dfb9ff1079 | ||
|
|
1ea2f9adf4 | ||
|
|
d4c49b16e4 | ||
|
|
fba29f203a | ||
|
|
3c4f32c8b8 | ||
|
|
e9423300d9 | ||
|
|
4d0b7bb8e1 | ||
|
|
033b874701 | ||
|
|
4e7e6bfe0a | ||
|
|
a9bf409a09 | ||
|
|
fa478638a9 | ||
|
|
182b059bf4 | ||
|
|
0fa4516ce4 | ||
|
|
04f2d69b83 | ||
|
|
0fea987dd2 | ||
|
|
00eff8c4a7 | ||
|
|
6c308aabae | ||
|
|
8bc1a3dca8 | ||
|
|
652c542b2f | ||
|
|
7c0b1b8171 | ||
|
|
d09cdb4880 | ||
|
|
3d1095218c | ||
|
|
949b2cf177 | ||
|
|
66a47d9a61 | ||
|
|
2a3758a98e | ||
|
|
dda5b1e370 | ||
|
|
de1f63505b | ||
|
|
4999e8af7e | ||
|
|
0565d81dc5 | ||
|
|
9f08d29bc8 | ||
|
|
249752e8ee | ||
|
|
973866c894 | ||
|
|
b2e6d01e8f | ||
|
|
875ea4b4c6 | ||
|
|
c7a5bb6031 | ||
|
|
28e1ee4891 | ||
|
|
a7eba8b006 | ||
|
|
d11841d760 | ||
|
|
05aa02005b | ||
|
|
f116e10d53 | ||
|
|
bb4f7936f9 | ||
|
|
a03003f5fd | ||
|
|
85a1c6d0b7 | ||
|
|
9930ddc555 | ||
|
|
02c5c13a6e | ||
|
|
fdbeb52756 | ||
|
|
0c8a88b3fa | ||
|
|
08feed3332 | ||
|
|
a758496236 | ||
|
|
103094286e | ||
|
|
fd8fe209cb | ||
|
|
e51bccdb28 | ||
|
|
e92e199ec1 | ||
|
|
90fd840fb1 | ||
|
|
09a92bb9bf | ||
|
|
a214fe8a2d | ||
|
|
a956b69720 | ||
|
|
d87cfd33e8 | ||
|
|
47a6b4d674 | ||
|
|
c4c79da071 | ||
|
|
069c0a041f | ||
|
|
5cd244e9b7 | ||
|
|
be9bc62f8b | ||
|
|
0808949e54 | ||
|
|
129d056085 | ||
|
|
5b3dbf12a5 | ||
|
|
9f545825b7 | ||
|
|
616e728ef9 | ||
|
|
83d2a871eb | ||
|
|
292ae8468e | ||
|
|
b58d492e05 | ||
|
|
df8e35fd81 | ||
|
|
083726ecda | ||
|
|
82f28ca9ef | ||
|
|
82fb56b79c | ||
|
|
99e5eaa9b1 | ||
|
|
d4f790fd40 | ||
|
|
c29fbede59 | ||
|
|
eee0d1d0dd | ||
|
|
ade683c589 | ||
|
|
50b8f4dcc7 | ||
|
|
2b06792c81 | ||
|
|
61e4a06447 | ||
|
|
ead04487fd | ||
|
|
354c42afd2 | ||
|
|
8976483f3a | ||
|
|
4452314aab | ||
|
|
edcb03943e | ||
|
|
89a8121eaa | ||
|
|
d5eb228874 | ||
|
|
463019ac3e | ||
|
|
a3dd4dcadf | ||
|
|
9417961b17 | ||
|
|
c9e9c0eeae | ||
|
|
44badd0707 | ||
|
|
e276ae2616 | ||
|
|
5aafb3bc46 | ||
|
|
d3f10d2f4f | ||
|
|
6ae58da668 | ||
|
|
a2f807e055 | ||
|
|
1ae5a9c7a3 | ||
|
|
ddcb4ff5fb | ||
|
|
1baedc4e18 | ||
|
|
46f3850794 | ||
|
|
24a197f96a | ||
|
|
8ddaaf3d41 | ||
|
|
a5e7dcec61 | ||
|
|
c1b1666ec8 | ||
|
|
7fe474d198 | ||
|
|
a6f9dccc35 | ||
|
|
b422dc035f | ||
|
|
0689628489 | ||
|
|
c37fd29fd8 | ||
|
|
56b40beb0e | ||
|
|
6de1ca4251 | ||
|
|
ab21af71be | ||
|
|
6f69b19ff5 | ||
|
|
89bec58cbb | ||
|
|
9e906c39ba | ||
|
|
6b0a849f59 | ||
|
|
c447e9a854 | ||
|
|
0dd2c21089 | ||
|
|
589927e9e1 | ||
|
|
bd80cad6db | ||
|
|
8c1a528c71 | ||
|
|
25cbcd9374 | ||
|
|
5d60ced7b3 | ||
|
|
ce78877a87 | ||
|
|
0c4683ebcc | ||
|
|
b11c233304 | ||
|
|
8c986221e4 | ||
|
|
8f2d321dd0 | ||
|
|
019aa04b06 | ||
|
|
77b359edf5 | ||
|
|
7e63270e04 | ||
|
|
a69d1b84f4 | ||
|
|
f2560188ec | ||
|
|
c0d67420e5 | ||
|
|
c194828be0 | ||
|
|
c71afb46d1 | ||
|
|
995ef8a7fc | ||
|
|
de8dfde7f7 | ||
|
|
e842131425 | ||
|
|
6dedd94ba4 | ||
|
|
c5e23293f8 | ||
|
|
90d7c55343 | ||
|
|
3c8e9a9641 | ||
|
|
2673b3a314 | ||
|
|
4c2de2a7f2 | ||
|
|
1c089cadd7 | ||
|
|
2e8733cf54 | ||
|
|
b04e472acf | ||
|
|
090411842e | ||
|
|
84a97d55e1 | ||
|
|
09aa1eac03 | ||
|
|
0f9f213833 | ||
|
|
15f1af8ed6 | ||
|
|
8bebc9206f | ||
|
|
a3c79b1909 | ||
|
|
23928a3311 | ||
|
|
ba5fbaba70 | ||
|
|
3e6cea46e2 | ||
|
|
63601551b1 | ||
|
|
1d55141c50 | ||
|
|
2519580994 | ||
|
|
74a64cfbab | ||
|
|
b9ca5cc5ea | ||
|
|
afba2be3dc | ||
|
|
9abf60acb6 | ||
|
|
b30f449dae | ||
|
|
bfbb97b74c | ||
|
|
1b3942ba74 | ||
|
|
852722ea45 | ||
|
|
358562769a | ||
|
|
3eccd72382 | ||
|
|
76d09b4ed0 | ||
|
|
1aae77f26f | ||
|
|
cf17c58b47 | ||
|
|
a091b4bf4c | ||
|
|
e0162baa3b | ||
|
|
5e9687a196 | ||
|
|
0a04e63811 | ||
|
|
0470198fb5 | ||
|
|
e986afa13a | ||
|
|
4b505060bd | ||
|
|
664ff28cba | ||
|
|
08a8363fc6 | ||
|
|
5e43768f61 | ||
|
|
a8aa1aba1c | ||
|
|
68d8f73698 | ||
|
|
ef0664728e | ||
|
|
eac4ddb4bb | ||
|
|
71d5b7c9bf | ||
|
|
41279a3ae1 | ||
|
|
22858d99b5 | ||
|
|
249d7d06a2 | ||
|
|
9529483c2a | ||
|
|
969e1683de | ||
|
|
c478fc208e | ||
|
|
d0a0d560ad | ||
|
|
93dd499997 | ||
|
|
a69cb95850 | ||
|
|
7810ea5812 | ||
|
|
b0896210c7 | ||
|
|
7124f2ebfa | ||
|
|
17ae2998e7 | ||
|
|
3f601b5809 | ||
|
|
03ea0762a1 | ||
|
|
4f1feaca83 | ||
|
|
89be10f6b4 | ||
|
|
04bc5f3b18 | ||
|
|
feec422bf7 | ||
|
|
5935767056 | ||
|
|
b5a57acf6c | ||
|
|
49f1d8477c | ||
|
|
f11e5442d6 | ||
|
|
72f9150a50 | ||
|
|
c172f972ea | ||
|
|
8189dea0d8 | ||
|
|
d95eeaedbe | ||
|
|
621da3c164 | ||
|
|
0fa69d8988 | ||
|
|
9b24f0b067 | ||
|
|
8d69dacdf3 | ||
|
|
cdfe2c96c5 | ||
|
|
19f504790e | ||
|
|
1b58460fe3 | ||
|
|
aca8cb5fba | ||
|
|
7edf4ca396 | ||
|
|
8aab39e3ce | ||
|
|
1d3735a84c | ||
|
|
45741bcc1b | ||
|
|
9b64932e55 | ||
|
|
eaa505fb09 | ||
|
|
e21152358a | ||
|
|
edb585228d | ||
|
|
0aabded97f | ||
|
|
00bf472265 | ||
|
|
bace17e0aa | ||
|
|
44bc89b7bf | ||
|
|
e4418d1b7e | ||
|
|
6cb763507c | ||
|
|
6d03f8b5d8 | ||
|
|
16af5f8690 | ||
|
|
8cb2594562 | ||
|
|
926c64da60 | ||
|
|
31cfc00845 | ||
|
|
f7ae183f40 | ||
|
|
0e5d09d0da | ||
|
|
9249d305af | ||
|
|
01ef786e7e | ||
|
|
3b754b5461 | ||
|
|
a429145420 | ||
|
|
7f0e847c13 | ||
|
|
991b448dfc | ||
|
|
3ab4e21579 | ||
|
|
2184e3a400 | ||
|
|
c0acbdca1b | ||
|
|
a2588d6c57 | ||
|
|
b80e3825a6 | ||
|
|
6abb2c2c08 | ||
|
|
57dd4daa9a | ||
|
|
5fc07fa524 | ||
|
|
02430e25b6 | ||
|
|
ee52482db8 | ||
|
|
bb6fbf4c71 | ||
|
|
45f0f9460a | ||
|
|
105c787e5a | ||
|
|
6221eb5974 | ||
|
|
cb5fb751e9 | ||
|
|
16bd328aab | ||
|
|
8eea46ed0e | ||
|
|
67ca187560 | ||
|
|
46f3428cb3 | ||
|
|
e3fb11bc10 | ||
|
|
1edead28b8 | ||
|
|
a5a4c53280 | ||
|
|
80b98812e1 | ||
|
|
fcbbddedae | ||
|
|
e94a5d753f | ||
|
|
b7bc8ec87f | ||
|
|
6c70f491ba | ||
|
|
f3f5853e9f | ||
|
|
2431eca700 | ||
|
|
641cb80c9d | ||
|
|
08a0741d82 | ||
|
|
8d351bfc20 | ||
|
|
3bdc273ab3 | ||
|
|
206f809366 | ||
|
|
8a320e55a0 | ||
|
|
e5db8a16c0 | ||
|
|
e162fd418a | ||
|
|
abb1264edf | ||
|
|
5e05ba2140 | ||
|
|
6e14f9548b | ||
|
|
2380492c8e | ||
|
|
d21333d710 | ||
|
|
dfb93dd2b5 | ||
|
|
2c7297d243 | ||
|
|
434a96415b | ||
|
|
c7a489ae0d | ||
|
|
15a5002746 | ||
|
|
f8ed93e7bd | ||
|
|
618cf5241e | ||
|
|
f4a47ec717 | ||
|
|
3b51817706 | ||
|
|
bbbd2b076f | ||
|
|
d248481f13 | ||
|
|
efa02ed768 | ||
|
|
5454591b0a | ||
|
|
c72da53c10 | ||
|
|
8dd071ad08 | ||
|
|
05cdd22c39 | ||
|
|
eb0134fbb3 | ||
|
|
96d064e305 | ||
|
|
50b13ab938 | ||
|
|
c2f46b2cdb | ||
|
|
808248049d | ||
|
|
a6e6e9bb86 | ||
|
|
90579021f8 | ||
|
|
539672a7fd | ||
|
|
269f85b7b7 | ||
|
|
3adb1e12ca | ||
|
|
b8df15cd64 | ||
|
|
4d72288487 | ||
|
|
3c6eccd701 | ||
|
|
429de77b3b | ||
|
|
7de6a1b78e | ||
|
|
04fcd2d2e0 | ||
|
|
5919c0f4a2 | ||
|
|
bcdf3be530 | ||
|
|
a2681f950d | ||
|
|
ef7f4aea32 | ||
|
|
224263aa24 | ||
|
|
dc4b037957 | ||
|
|
1fa5d94591 | ||
|
|
3f64b8a761 | ||
|
|
0a1be1d501 | ||
|
|
e3056340da | ||
|
|
99b5a7226c | ||
|
|
95cf7de112 | ||
|
|
8f0cd91d57 | ||
|
|
15f650ae8c | ||
|
|
7543a3d70e | ||
|
|
ab193338aa | ||
|
|
bb12184551 | ||
|
|
33a2f58fbf | ||
|
|
fad26e79a3 | ||
|
|
b2eb4ff0fc | ||
|
|
2d078c7767 | ||
|
|
a7824f16f2 | ||
|
|
642b57c7ff | ||
|
|
4a07fba9f0 | ||
|
|
c5c0735fc4 | ||
|
|
6327eecdaf | ||
|
|
beab637f04 | ||
|
|
4a63533216 | ||
|
|
bf4a112aa6 | ||
|
|
d1e305028f | ||
|
|
6b9f266837 | ||
|
|
6116cbf0de | ||
|
|
67718c1d6b | ||
|
|
61c2d918c6 | ||
|
|
52d6b91c18 | ||
|
|
e74a605379 | ||
|
|
022ef170f8 | ||
|
|
fa30a57034 | ||
|
|
1f9124ceaa | ||
|
|
b52a3785c9 | ||
|
|
ff44fe4e16 | ||
|
|
d56eff042a | ||
|
|
ce3666c28b | ||
|
|
cff52638b2 | ||
|
|
bbd22b9b76 | ||
|
|
33cdb06b5c | ||
|
|
cc908d49a3 | ||
|
|
7fc07ba5df | ||
|
|
fe78aff1f2 | ||
|
|
40079d4936 | ||
|
|
84c1ad7eaa | ||
|
|
9892e95d03 | ||
|
|
f616aee35a | ||
|
|
ab47557db3 | ||
|
|
40096c73cd | ||
|
|
fbc83dfdbb | ||
|
|
91be7eee66 | ||
|
|
fc2f450f2d | ||
|
|
aeaef8f3a3 | ||
|
|
472f00ada7 | ||
|
|
6e3fa59073 | ||
|
|
a616e19975 | ||
|
|
100d9ce4c7 | ||
|
|
c9da300e4d | ||
|
|
5a9765b1b5 | ||
|
|
454998c1fb | ||
|
|
0adc282d70 | ||
|
|
bd4865b6fe | ||
|
|
485d716c21 | ||
|
|
b57fa1a39c | ||
|
|
6b93670410 | ||
|
|
2bb1d256f3 | ||
|
|
4a7ebb7184 | ||
|
|
797c9e92c8 | ||
|
|
5f1aab5487 | ||
|
|
983678dedc | ||
|
|
f76d50d8dc | ||
|
|
15c271e7b3 | ||
|
|
d7b613a293 | ||
|
|
2f309a4ce6 | ||
|
|
2111ed3c75 | ||
|
|
d9bc46186d | ||
|
|
1bd4890506 | ||
|
|
b0d0338f21 | ||
|
|
a22d502248 | ||
|
|
9b86235a56 | ||
|
|
9fc9018951 | ||
|
|
ef5bc1fef1 | ||
|
|
1d68470bac | ||
|
|
c8f3615aa6 | ||
|
|
d00a247da7 | ||
|
|
21771a6f1c | ||
|
|
e5fed7d535 | ||
|
|
19dfe166c9 | ||
|
|
91a0817e39 | ||
|
|
f437311eef | ||
|
|
003e1ca9a0 | ||
|
|
8374367de2 | ||
|
|
82ef1f587d | ||
|
|
b0d0399d34 | ||
|
|
a6ee646ef3 | ||
|
|
bd61757423 | ||
|
|
affaaea87b | ||
|
|
8c35fcb571 | ||
|
|
e45be8b3f6 | ||
|
|
0d5a90f30a | ||
|
|
6b007e2829 | ||
|
|
be638ad77d | ||
|
|
115a77142a | ||
|
|
f0b0c72d98 | ||
|
|
6aee589eec | ||
|
|
5b7ff215e8 | ||
|
|
0f0ccfe7f6 | ||
|
|
2759e2d857 | ||
|
|
0f68054401 | ||
|
|
0ead8ea708 | ||
|
|
c7ea6e9ff8 | ||
|
|
812419d946 | ||
|
|
873a80e496 | ||
|
|
d1b95db874 | ||
|
|
6c3573e7f6 | ||
|
|
179a39954d | ||
|
|
6f0bccfeb5 | ||
|
|
e68a1d73d0 | ||
|
|
29f51055e8 | ||
|
|
5d765408ce | ||
|
|
404d103c41 | ||
|
|
47eea32f6a | ||
|
|
b786335dd1 | ||
|
|
f81e613086 | ||
|
|
8ef7e14a85 | ||
|
|
53e4148a1b | ||
|
|
4e8f11b36a | ||
|
|
2928a1a3c9 | ||
|
|
814faa9de5 | ||
|
|
8a8917e0d9 | ||
|
|
b2b71b0d35 | ||
|
|
8022293124 | ||
|
|
1f54ec899b | ||
|
|
a137492b53 | ||
|
|
e283dc8d50 | ||
|
|
81e0cbf2d5 | ||
|
|
37aade19da | ||
|
|
43dffe39fb | ||
|
|
71f98db2fe | ||
|
|
f68f3b23d7 | ||
|
|
206901fa01 | ||
|
|
7ea2b08d1f | ||
|
|
368aa4ede7 | ||
|
|
5018af8839 | ||
|
|
96b0ff182e | ||
|
|
59194c2214 | ||
|
|
ee1d13678e | ||
|
|
1335f2b9f8 | ||
|
|
16551536e3 | ||
|
|
c5fb3b6069 | ||
|
|
1ec0b18379 | ||
|
|
f31047a394 | ||
|
|
5c516945d0 | ||
|
|
d2adec3818 | ||
|
|
7c5c0557cb | ||
|
|
68113348cc | ||
|
|
b574507c51 | ||
|
|
4806504ebc | ||
|
|
31820a31e4 | ||
|
|
13ccf202de | ||
|
|
6705928b9d | ||
|
|
8961c720b8 | ||
|
|
73072d3db8 | ||
|
|
2de028834f | ||
|
|
a7000ee89e | ||
|
|
9c2b29a1cb | ||
|
|
f190bc3e83 | ||
|
|
96843f3bd4 | ||
|
|
7df2dfc4c2 | ||
|
|
ed9a0f8185 | ||
|
|
465faab935 | ||
|
|
0ec020698f | ||
|
|
bd2e298468 | ||
|
|
66226d1d4d | ||
|
|
e83250cc5f | ||
|
|
2a26cc6d2b | ||
|
|
3fbb737bb3 | ||
|
|
53f3793504 | ||
|
|
ec40ead980 | ||
|
|
ff5024634e | ||
|
|
1e8fca5518 | ||
|
|
8061994c61 | ||
|
|
8d2344db43 | ||
|
|
b4a126ae71 | ||
|
|
7e70cd2a28 | ||
|
|
de61ebd9e0 | ||
|
|
c19a0b9c10 | ||
|
|
5a490a79f4 | ||
|
|
64d0a0fcc0 | ||
|
|
bca0749a11 | ||
|
|
07d6d1ca38 | ||
|
|
144b4c0c78 | ||
|
|
844eca98d5 | ||
|
|
1ab773c742 | ||
|
|
15de57b848 | ||
|
|
4780156955 | ||
|
|
5e3b968078 | ||
|
|
913a156cff | ||
|
|
893f3014af | ||
|
|
a8be207ea3 | ||
|
|
6556a8fcfd | ||
|
|
a795c3d860 | ||
|
|
9975ba4124 | ||
|
|
7f9c6c3baa | ||
|
|
b2f8a5bae9 | ||
|
|
e98e2b2b81 | ||
|
|
529cb2e30c | ||
|
|
04ebdbe98f | ||
|
|
08f5e6b801 | ||
|
|
4923cf029a | ||
|
|
549720ae51 | ||
|
|
18a2452121 | ||
|
|
4d526c49ed | ||
|
|
8f14ddefdf | ||
|
|
490ad93b3c | ||
|
|
b65a9414bb | ||
|
|
ae4638aa35 | ||
|
|
872abb4198 | ||
|
|
412fa4e1db | ||
|
|
b7c0eb9ecb | ||
|
|
13b4f465e2 | ||
|
|
7d79178827 | ||
|
|
d935573362 | ||
|
|
3314f54383 | ||
|
|
f63240649c | ||
|
|
17953ab61f | ||
|
|
2448043b84 | ||
|
|
3892cefac6 | ||
|
|
8ee56b9a5b | ||
|
|
b7d6e1909c | ||
|
|
62b8b459c6 | ||
|
|
2311d57df4 | ||
|
|
7124377524 | ||
|
|
abe4c361f9 | ||
|
|
e0de62f6da | ||
|
|
2db2987b1b | ||
|
|
fab24457bc | ||
|
|
3a78450883 | ||
|
|
af7e70d4af | ||
|
|
06bdbe06fe | ||
|
|
e62a1686e2 | ||
|
|
760c278fe0 | ||
|
|
61dd92f821 | ||
|
|
394b67ab92 | ||
|
|
d5884017a9 | ||
|
|
0ad2d5f27a | ||
|
|
82df923f37 | ||
|
|
1b0bfa54cf | ||
|
|
c7ff5f19a8 | ||
|
|
a221a9ced0 | ||
|
|
a1a650c743 | ||
|
|
1efb9bae5f | ||
|
|
877d384bc9 | ||
|
|
e66759cc9d | ||
|
|
ecd4aae818 | ||
|
|
6dd18eee26 | ||
|
|
a003a0baf6 | ||
|
|
e758e9e7f5 | ||
|
|
caa6caeb8a | ||
|
|
25b8cc7e3d | ||
|
|
d7e6770de8 | ||
|
|
594f195e54 | ||
|
|
ba4e82bb47 | ||
|
|
dc3ca44e05 | ||
|
|
b2e4b9dca4 | ||
|
|
cddd8ae83d | ||
|
|
f499e6ea6a | ||
|
|
539574670c | ||
|
|
2ab13ab743 | ||
|
|
01217b2247 | ||
|
|
55beab326c | ||
|
|
41524304bf | ||
|
|
f5bf893035 | ||
|
|
0e9e5b5202 | ||
|
|
68763bd25f | ||
|
|
ff98fad2d9 | ||
|
|
94a693e2ee | ||
|
|
0eca3e7d90 | ||
|
|
cf608f876b | ||
|
|
1bbadde77b | ||
|
|
944321c6ab | ||
|
|
ef6332ead6 | ||
|
|
41bb3a6f9b | ||
|
|
934ea80780 | ||
|
|
93260a9922 | ||
|
|
ae78ef7fe6 | ||
|
|
e7e5cb9d08 | ||
|
|
412e29d436 | ||
|
|
9eb7e6e27f | ||
|
|
db9d5b213a | ||
|
|
ddc353a768 | ||
|
|
ed24de8467 | ||
|
|
c5988c1d4b | ||
|
|
bf1357f584 | ||
|
|
a3ac9b23eb | ||
|
|
ee6ff96e28 | ||
|
|
ceab0a7c1f | ||
|
|
e5dba8978a | ||
|
|
01a9b06400 | ||
|
|
a612800ef0 | ||
|
|
04a4d3e312 | ||
|
|
b9db3dd09b | ||
|
|
862e9aed66 | ||
|
|
2c2fd9ff13 | ||
|
|
77c0582243 | ||
|
|
6b88fbd9bb | ||
|
|
f3d2fdd54c | ||
|
|
f27176930a | ||
|
|
70604e590f | ||
|
|
8ce661d5a1 | ||
|
|
61347bd322 | ||
|
|
6384c1ec8f | ||
|
|
ad38eb2d50 | ||
|
|
83a53e2126 | ||
|
|
457a4730b2 | ||
|
|
4da43f77e5 | ||
|
|
5c6dcb1960 | ||
|
|
adf019724f | ||
|
|
9cbefcc56c | ||
|
|
7a00f17033 | ||
|
|
d1d691caa4 | ||
|
|
479cc086ba | ||
|
|
68a906bb31 | ||
|
|
7734a2b5ab | ||
|
|
c14571ab37 | ||
|
|
dd87275dde | ||
|
|
1f40d3e094 | ||
|
|
ec069381fb | ||
|
|
30c2d3cd06 | ||
|
|
0af48b06d0 | ||
|
|
c1ea8da9bc | ||
|
|
af788b7cf0 | ||
|
|
bed8eb978e | ||
|
|
afc55a4fee | ||
|
|
0a16b3d84b | ||
|
|
a7efa95775 | ||
|
|
e58b1d7073 | ||
|
|
125ae6d9de | ||
|
|
04e45f9cde | ||
|
|
59a7c5877a | ||
|
|
00de334f81 | ||
|
|
3662aca7d4 | ||
|
|
8f158b72fc | ||
|
|
f7ad14acfa | ||
|
|
73d5cba308 | ||
|
|
483f6c2fe3 | ||
|
|
24f889f2bc | ||
|
|
1f055775f8 | ||
|
|
76102971c0 | ||
|
|
3e7d2a1b64 | ||
|
|
c580c81cca | ||
|
|
3eb4112a1f | ||
|
|
1cc7d4c9eb | ||
|
|
7cbe28ba9b | ||
|
|
72eb4fa4e8 | ||
|
|
1a7d8667c8 | ||
|
|
ae28568e2a | ||
|
|
d983046f90 | ||
|
|
82b8d8596c | ||
|
|
848454d1e7 | ||
|
|
4928f7a9f5 | ||
|
|
14aa27b5f4 | ||
|
|
e7d64f8b15 | ||
|
|
120cdf813d | ||
|
|
026269bfa9 | ||
|
|
d5689d58ab | ||
|
|
3caccf304c | ||
|
|
f3908627ed | ||
|
|
c8c8635dc9 | ||
|
|
8ea840432f | ||
|
|
80b3ec5869 | ||
|
|
33fd6184ba | ||
|
|
fa8906a9b7 | ||
|
|
8f5000146c | ||
|
|
7686dabd36 | ||
|
|
fb62f2be70 | ||
|
|
9205919ad2 | ||
|
|
670304a8b3 | ||
|
|
c5b50be225 | ||
|
|
961a0e200f | ||
|
|
77bf75c236 | ||
|
|
e46126eac6 | ||
|
|
f0eb5db670 | ||
|
|
cbf2fc8af8 | ||
|
|
d81d6e874f | ||
|
|
506b21bfc2 | ||
|
|
9854d9e5cb | ||
|
|
9f3073d418 | ||
|
|
86946a47a8 | ||
|
|
8b08687fc4 | ||
|
|
aa0e69bc98 | ||
|
|
95bcf68802 | ||
|
|
8dcabd9205 | ||
|
|
58f65fcf12 | ||
|
|
0faba034b1 | ||
|
|
d353d668e4 | ||
|
|
08c658d3f8 | ||
|
|
344cbd9c90 | ||
|
|
17c06ee456 | ||
|
|
da04760de1 | ||
|
|
f35db9f43e | ||
|
|
623b321e75 | ||
|
|
95e369b38d | ||
|
|
c38965fcba | ||
|
|
355b7d8b86 | ||
|
|
5a084e1b20 | ||
|
|
cf60cff1ef | ||
|
|
1f3b987860 | ||
|
|
ae8bc9e830 | ||
|
|
dc9d6cadab | ||
|
|
f99f497b2c | ||
|
|
56c6ab1715 | ||
|
|
ebc5ff2948 | ||
|
|
ed6a5532ac | ||
|
|
7717c24fc4 | ||
|
|
973593c5c7 | ||
|
|
31b7ddc12c | ||
|
|
995220b797 | ||
|
|
5137f40dd6 | ||
|
|
9226fda58b | ||
|
|
7239d57a53 | ||
|
|
021bb9be84 | ||
|
|
62d0475c29 | ||
|
|
e2a99bd169 | ||
|
|
ec4f93b629 | ||
|
|
5f10d2ea1d | ||
|
|
095937ad52 | ||
|
|
7c24a6b9d1 | ||
|
|
1d7414a371 | ||
|
|
d8c40253c3 | ||
|
|
ea028b66ab | ||
|
|
453d4c3a99 | ||
|
|
d593833e4d | ||
|
|
aea97efe8b | ||
|
|
c416dbe8e0 | ||
|
|
ea149dbd89 | ||
|
|
d6493590da | ||
|
|
812a1643db | ||
|
|
54e02e4392 | ||
|
|
0ffb7fc10c | ||
|
|
493cbc9410 | ||
|
|
73901ef132 | ||
|
|
24b26a922a | ||
|
|
0613ed5b95 | ||
|
|
5694e7b8cf | ||
|
|
4a5894db47 | ||
|
|
19e8472521 | ||
|
|
8edb1db9dc | ||
|
|
df84e1bb64 | ||
|
|
a4c5914c9a | ||
|
|
5d021c0962 | ||
|
|
3adab5e5be | ||
|
|
854a2be0ca | ||
|
|
9aef79c2e3 | ||
|
|
dfc533aa74 | ||
|
|
d9b5bcd691 | ||
|
|
f97535b33e | ||
|
|
7bb843477f | ||
|
|
4d8b48bdb3 | ||
|
|
f6839a8682 | ||
|
|
6792a3557d | ||
|
|
b65102bdb2 | ||
|
|
9d7e57f5c0 | ||
|
|
8bb33f2296 | ||
|
|
efa67ed0ef | ||
|
|
d92926cbc2 | ||
|
|
4a810756f8 | ||
|
|
f2ef3ff54a | ||
|
|
1152f4d48b | ||
|
|
bdf0c2267f | ||
|
|
2139d0197e | ||
|
|
10246375a5 | ||
|
|
41c841ec85 | ||
|
|
b9639f6067 | ||
|
|
dc8b790214 | ||
|
|
25a2bdfb70 | ||
|
|
0d23c0c82a | ||
|
|
862268175e | ||
|
|
21d1c988a9 | ||
|
|
177baef3a1 | ||
|
|
69b9db2b5e | ||
|
|
f29a5d4bcc | ||
|
|
75d3f1e5e6 | ||
|
|
c6d1d6d7fc | ||
|
|
259a409998 | ||
|
|
235264a246 | ||
|
|
5de7815310 | ||
|
|
4a05b7f772 | ||
|
|
dda11d2a05 | ||
|
|
527210972e | ||
|
|
c460c29a64 | ||
|
|
3902b85657 | ||
|
|
f1eaa9b626 | ||
|
|
6a32f93669 | ||
|
|
17956ff08e | ||
|
|
c6f2d27789 | ||
|
|
3179ee3a56 | ||
|
|
d87564951e | ||
|
|
e294ba475a | ||
|
|
46330da2e7 | ||
|
|
f5ae8f1980 | ||
|
|
74b701f42b | ||
|
|
5b4d53e8ef | ||
|
|
2aa3cf4e5f | ||
|
|
3c489be773 | ||
|
|
2a315dbee9 | ||
|
|
3f1302a4ab | ||
|
|
9cdea4e0e1 | ||
|
|
98c48f303a | ||
|
|
111bd7ddbe | ||
|
|
ee40d37098 | ||
|
|
fa0a9e502a | ||
|
|
25e3d3f283 | ||
|
|
2e47412073 | ||
|
|
ff3aada0b2 | ||
|
|
ca79044948 | ||
|
|
beb38f4f4d | ||
|
|
1db13e8a85 | ||
|
|
c58d35765d | ||
|
|
ed97af423c | ||
|
|
c4ece52dac | ||
|
|
0d058d4046 | ||
|
|
4cb9f1eda8 | ||
|
|
1d06eee3b5 | ||
|
|
2e3d77c34e | ||
|
|
c871c04270 | ||
|
|
96f3dff050 | ||
|
|
4c8106311f | ||
|
|
b8b8a138df | ||
|
|
43f900fd38 | ||
|
|
b7c409152a | ||
|
|
b015647e31 | ||
|
|
b6a7f40ad3 | ||
|
|
1ff5b67025 | ||
|
|
275b926cf7 | ||
|
|
9800c6051c | ||
|
|
77e6bbe6f0 | ||
|
|
2be3515a66 | ||
|
|
fcf98dc4c1 | ||
|
|
bae93682f6 | ||
|
|
b065da6933 | ||
|
|
87d81b6acc | ||
|
|
210296a71f | ||
|
|
ad7d97670b | ||
|
|
7d4843fe84 | ||
|
|
6d88b23ef7 | ||
|
|
663b0933e4 | ||
|
|
1e40427755 | ||
|
|
85e1c9b348 | ||
|
|
45bb414be2 | ||
|
|
6325a3517c | ||
|
|
82f3e32d8d | ||
|
|
af6d333147 | ||
|
|
3874bb256e | ||
|
|
574698a5fb | ||
|
|
854f3fe9b1 | ||
|
|
051fac1e66 | ||
|
|
5db4dba526 | ||
|
|
9124221d31 | ||
|
|
c087ce74f7 | ||
|
|
ae7714f1ba | ||
|
|
fbc97a77ed | ||
|
|
120c52589b | ||
|
|
c7b687e944 | ||
|
|
aab2a7cd4b | ||
|
|
5f03cc3511 | ||
|
|
3dd0704e38 | ||
|
|
24c1654208 | ||
|
|
c17a80f11c | ||
|
|
a673a51efa | ||
|
|
224199083b | ||
|
|
af3f401015 | ||
|
|
98e1bbfbbd | ||
|
|
6f62e5461c | ||
|
|
b08f903755 | ||
|
|
f307ca094b | ||
|
|
488d2d5da9 | ||
|
|
a8bbfb2da3 | ||
|
|
92ef77da35 | ||
|
|
7f8ff2a317 | ||
|
|
c5e50c40c9 | ||
|
|
a08baa97c5 | ||
|
|
cdb93ab5ca | ||
|
|
8effd90be0 | ||
|
|
f11d845dee | ||
|
|
0e1d7a27c6 | ||
|
|
53722dcfdc | ||
|
|
1d4db1327a | ||
|
|
ee70d4a0cd | ||
|
|
9b215e761e | ||
|
|
2f848294cb | ||
|
|
d85c33a5c3 | ||
|
|
0d92a7f357 | ||
|
|
931e68692e | ||
|
|
be29a6287d | ||
|
|
adc96d60b6 | ||
|
|
93a84f6182 | ||
|
|
22525bad65 | ||
|
|
6e1000dc8d | ||
|
|
f3c9bf5e4b | ||
|
|
6cdd4b5edc | ||
|
|
50316f6477 | ||
|
|
603a0bea29 | ||
|
|
3f7213586e | ||
|
|
5f17c57174 | ||
|
|
ebcb144342 | ||
|
|
641fd74baa | ||
|
|
2667ddc686 | ||
|
|
74c28df363 | ||
|
|
5c3fe8b0d1 | ||
|
|
2babe3069f | ||
|
|
e811c5e8c6 | ||
|
|
8741e55e7c | ||
|
|
00c466627a | ||
|
|
cc0585af42 | ||
|
|
b96ac13f3d | ||
|
|
9cb2347453 | ||
|
|
c4d53f98dc | ||
|
|
2c2f0e15a6 | ||
|
|
0ea7224535 | ||
|
|
1f83b5f47e | ||
|
|
6674b33cf5 | ||
|
|
406a9dc11f | ||
|
|
9e067b8cc9 | ||
|
|
3c4338470e | ||
|
|
d2137eea9f | ||
|
|
9129318466 | ||
|
|
2e4047e5e7 | ||
|
|
1dd4236177 | ||
|
|
4a94f56258 | ||
|
|
5171c3bcca | ||
|
|
bd0c6381f5 | ||
|
|
28d2b213a4 | ||
|
|
dd648183fa | ||
|
|
5eec74d9a5 | ||
|
|
9d13dcd17c | ||
|
|
5debd5043e | ||
|
|
9b615022e2 | ||
|
|
92b4418c8c | ||
|
|
7d29bb2c02 | ||
|
|
21a353e9c2 | ||
|
|
d2cf0d16b3 | ||
|
|
04cddfba0d | ||
|
|
bcab894f4e | ||
|
|
490f4a9ff0 | ||
|
|
7ffc431b3a | ||
|
|
50a9fcccb0 | ||
|
|
a5fd8873b1 | ||
|
|
dfc3f83b0f | ||
|
|
c7f7788d0b | ||
|
|
8f8e8d701e | ||
|
|
560c4dfc98 | ||
|
|
f5bd88757e | ||
|
|
ea9c3cc9c9 | ||
|
|
5da9f9abcb | ||
|
|
2eb4a2ceea | ||
|
|
e7420789e4 | ||
|
|
26c86a197c | ||
|
|
1d649b127e | ||
|
|
362bc301df | ||
|
|
a1603fccfb | ||
|
|
4ba7396f96 | ||
|
|
633b673b85 | ||
|
|
4d697d3f24 | ||
|
|
612a74eb7e | ||
|
|
4789c99bc2 | ||
|
|
fb6e63dc36 | ||
|
|
c5edbea34a | ||
|
|
1ac347b4e3 | ||
|
|
705d2f5b92 | ||
|
|
ec033ae277 | ||
|
|
da5b0723d2 | ||
|
|
184ede4e48 | ||
|
|
7cdf97ba9b | ||
|
|
4d427b2397 | ||
|
|
2179d4eef8 | ||
|
|
df746ad821 | ||
|
|
c9a0f24646 | ||
|
|
34a2755a54 | ||
|
|
4e7d0c115b | ||
|
|
01dca1e438 | ||
|
|
1ac6deda89 | ||
|
|
4e180dc54e | ||
|
|
3ce4e46c8c | ||
|
|
b489466488 | ||
|
|
38ca5c84cb | ||
|
|
49b2b0e3c0 | ||
|
|
a2830e3056 | ||
|
|
cb4e88e4fb | ||
|
|
d1c7237034 | ||
|
|
0ed2da7020 | ||
|
|
1c8cff32f1 | ||
|
|
fd7145970f | ||
|
|
3074306ae1 | ||
|
|
5809c3d29d | ||
|
|
87f75cb322 | ||
|
|
284d40b7af | ||
|
|
8d961b9e33 | ||
|
|
a9c5b4bcea | ||
|
|
9954eff8fd | ||
|
|
6095a0a310 | ||
|
|
e047541b5f | ||
|
|
152dc59060 | ||
|
|
927c8eb91a | ||
|
|
bac56618b4 | ||
|
|
d642609a23 | ||
|
|
ec10787bc7 | ||
|
|
b21c2f8704 | ||
|
|
e736d60516 | ||
|
|
12d14f8947 | ||
|
|
cb9ff6efb8 | ||
|
|
1f4a51cb9c | ||
|
|
a6b39afe0e | ||
|
|
1a4ca3eff9 | ||
|
|
6ff9e9b34a | ||
|
|
09acbb8410 | ||
|
|
e0cb3ea90c | ||
|
|
4450791edd | ||
|
|
a7ae35fe4e | ||
|
|
681f2678a3 | ||
|
|
c23e16c459 | ||
|
|
8c371e12eb | ||
|
|
c7cf11b8ab | ||
|
|
fed64ae060 | ||
|
|
576880abc5 | ||
|
|
e8f24164f0 | ||
|
|
ae5aa496ee | ||
|
|
b9d6d4cd4c | ||
|
|
8b19f6a0da | ||
|
|
ec66d5188c | ||
|
|
e61cfb6e99 | ||
|
|
0c7a5cb206 | ||
|
|
b151d4257a | ||
|
|
887bb12287 | ||
|
|
f773c21723 | ||
|
|
0e878ccc2d | ||
|
|
57d8a3d1e8 | ||
|
|
c36f852846 | ||
|
|
035ad33a5b | ||
|
|
cabd358c3a | ||
|
|
52b016920c | ||
|
|
695e7027e6 | ||
|
|
930e319ca7 | ||
|
|
6aa66fd2b0 | ||
|
|
8afc8e6f5d | ||
|
|
f891f7d69f | ||
|
|
83cf01683e | ||
|
|
607708a411 | ||
|
|
75aa408f10 | ||
|
|
0dc700eebf | ||
|
|
d6541da161 | ||
|
|
d669b9ece9 | ||
|
|
e533da8bf2 | ||
|
|
836d2009cb | ||
|
|
d65b1951bd | ||
|
|
265f05b10e | ||
|
|
47e7d09dff | ||
|
|
79b59a8e06 | ||
|
|
6711854e30 | ||
|
|
cab7d86f23 | ||
|
|
3ae11b7582 | ||
|
|
a2f191a322 | ||
|
|
61938a02a1 | ||
|
|
ecee4d6e92 | ||
|
|
fa55c5a16b | ||
|
|
8a7c95e555 | ||
|
|
e4459e423b | ||
|
|
4c1c05c2c7 | ||
|
|
30d8d1d3d0 | ||
|
|
9abf1847f4 | ||
|
|
7d92e9407b | ||
|
|
e288410e72 | ||
|
|
26409b01bd | ||
|
|
6f358bb04a | ||
|
|
6eff0fa2ca | ||
|
|
81e5b1ad36 | ||
|
|
baf48d3583 | ||
|
|
8045870a0f | ||
|
|
db98c44f8f | ||
|
|
7cd0936b1c | ||
|
|
38f853dfa3 | ||
|
|
ee1d488c03 | ||
|
|
6666e422c6 | ||
|
|
8410c6a747 | ||
|
|
7b585c7585 | ||
|
|
6fc24743b7 | ||
|
|
79fb90aafd | ||
|
|
1415966d64 | ||
|
|
a94c4cca68 | ||
|
|
e18e838aae | ||
|
|
e27ba9d92b | ||
|
|
39e685b80f | ||
|
|
bf9e4ef35f | ||
|
|
9cfb311ecb | ||
|
|
405865c91a | ||
|
|
c9f696f063 | ||
|
|
e8531769f7 | ||
|
|
2984803597 | ||
|
|
da69a6771f | ||
|
|
b39017dc11 | ||
|
|
898087d02c | ||
|
|
0ad984fa27 | ||
|
|
81eebc4070 | ||
|
|
5585607654 | ||
|
|
265c285057 | ||
|
|
6631fd5168 | ||
|
|
696886f397 | ||
|
|
0b69a7e9ab | ||
|
|
9ca4c54428 | ||
|
|
dfa48dc3b5 | ||
|
|
04001ff077 | ||
|
|
3f9744c9f4 | ||
|
|
fd3f8efec7 | ||
|
|
490fcf9d98 | ||
|
|
c8f8b1b327 | ||
|
|
e49abd1277 | ||
|
|
fad2c7e5e0 | ||
|
|
98dbea6310 | ||
|
|
d0c7f7c317 | ||
|
|
719316e84c | ||
|
|
e2d61ab85a | ||
|
|
16fbd528c5 | ||
|
|
80e86b602e | ||
|
|
c669d98693 | ||
|
|
1cdb33a090 | ||
|
|
a081e419a0 | ||
|
|
be93775ebc | ||
|
|
60b05511d3 | ||
|
|
200be43da6 | ||
|
|
6d15854cda | ||
|
|
153b56d19b | ||
|
|
1feac83323 | ||
|
|
77ae8084a0 | ||
|
|
e41b382e1c | ||
|
|
5a45363954 | ||
|
|
7acd524210 | ||
|
|
9dc77614e3 | ||
|
|
e5f6f0ffc4 | ||
|
|
052c797429 | ||
|
|
dc2264619a | ||
|
|
6a64870ea0 | ||
|
|
7ebb76a5fa | ||
|
|
8d2281a8ca | ||
|
|
3bfe7cf467 | ||
|
|
556c425042 | ||
|
|
0498dad562 | ||
|
|
59697b406d | ||
|
|
aa37b10b28 | ||
|
|
b0859c9b18 | ||
|
|
a5b206caf3 | ||
|
|
b26cca8008 | ||
|
|
e4625846e5 | ||
|
|
e3b7effc8f | ||
|
|
1ce9ef3828 | ||
|
|
eb180e321f | ||
|
|
64039b9f11 | ||
|
|
13c62cf6b1 | ||
|
|
8c73037dff | ||
|
|
8f5eca236f | ||
|
|
60b0d6ea35 | ||
|
|
521c6f0233 | ||
|
|
bd6a0ee9e9 | ||
|
|
f780678910 | ||
|
|
73831ef3d8 | ||
|
|
7d8830f707 | ||
|
|
0f6737735d | ||
|
|
e9950392dd | ||
|
|
429f4dbe4d | ||
|
|
76d03f398d | ||
|
|
5861770a53 | ||
|
|
140ba682f1 | ||
|
|
9ca1cf003c | ||
|
|
20c6ade2fc | ||
|
|
6157bdf9d9 | ||
|
|
1c66aa6d56 | ||
|
|
0ba175e13f | ||
|
|
75fb9d2fdc | ||
|
|
f5663603cf | ||
|
|
be164b20d8 | ||
|
|
8502117f62 | ||
|
|
6370808d41 | ||
|
|
cbd759aaeb | ||
|
|
3ac08c3de4 | ||
|
|
a6b40b73e5 | ||
|
|
99cfe192da | ||
|
|
2e39ede848 | ||
|
|
398e4cd2dc | ||
|
|
57f370cde9 | ||
|
|
c9c8d2599e | ||
|
|
16b11bda83 | ||
|
|
f07dd02b50 | ||
|
|
e5611565b7 | ||
|
|
9d1bd18596 | ||
|
|
a435a436c1 | ||
|
|
d6cd0deaef | ||
|
|
1db266b20d | ||
|
|
3f9900a864 | ||
|
|
3ca1a387c2 | ||
|
|
f92ccf70fd | ||
|
|
f3d178f600 | ||
|
|
dd2a151543 | ||
|
|
d6664af0ee | ||
|
|
efe0d39c6a | ||
|
|
b4c196f785 | ||
|
|
f1070de038 | ||
|
|
ef72a7cf26 | ||
|
|
a980095efc | ||
|
|
74848aafea | ||
|
|
b24472eae3 | ||
|
|
e53995836a | ||
|
|
e494b0a09f | ||
|
|
da462d9dd4 | ||
|
|
24e4ae95ba | ||
|
|
8392ca602c | ||
|
|
fcb3a64799 | ||
|
|
e1fdb67440 | ||
|
|
ad028bbb80 | ||
|
|
6ca383ecf6 | ||
|
|
7ac9b22886 | ||
|
|
4535b0b41e | ||
|
|
07d802d088 | ||
|
|
49c864fa18 | ||
|
|
d7dbf4aefe | ||
|
|
cc60fed3be | ||
|
|
2928b080f6 | ||
|
|
c460b04c64 | ||
|
|
b3f8324de9 | ||
|
|
70f7c2bb2e | ||
|
|
9ca3b4645e | ||
|
|
d1bcc58beb | ||
|
|
6d30acffcb | ||
|
|
ba622764cb | ||
|
|
ec8247ec59 | ||
|
|
d84a3bcf7a | ||
|
|
a15afc102c | ||
|
|
cc33bde74f | ||
|
|
2aeb8e7dbc | ||
|
|
0f6ef048d2 | ||
|
|
fe941cb54a | ||
|
|
9187d2f3a9 | ||
|
|
e9877ea8b1 | ||
|
|
f9771700e4 | ||
|
|
87802c86d9 | ||
|
|
05eec99269 | ||
|
|
be68f6f8ce | ||
|
|
b32cc01c9f | ||
|
|
afc292e58d | ||
|
|
3e30a5d967 | ||
|
|
9d1b3bab76 | ||
|
|
408c8d0178 | ||
|
|
d89e10d361 | ||
|
|
1742db0c30 | ||
|
|
e1b801be36 | ||
|
|
1da99ce013 | ||
|
|
dd36adc0f4 | ||
|
|
ef4c7b54ef | ||
|
|
068142fce2 | ||
|
|
c289cc891a | ||
|
|
2518e6c95b | ||
|
|
9fbe346860 | ||
|
|
fa1bb873e2 | ||
|
|
b7e1c54947 | ||
|
|
2da1aab50b | ||
|
|
1c81883d42 | ||
|
|
3364e5818b | ||
|
|
f1e1ac2a01 | ||
|
|
db8b13df4c | ||
|
|
5e5b30b74f | ||
|
|
2acf109c4b | ||
|
|
48381f1f78 | ||
|
|
b1de927f1b | ||
|
|
4e5d78579b | ||
|
|
73da193a4b | ||
|
|
ba256b23f2 | ||
|
|
f6fdabd20b | ||
|
|
dbe1d029ec | ||
|
|
082976d8d0 | ||
|
|
fe828185ed | ||
|
|
9e52134d30 | ||
|
|
c2b25c17c5 | ||
|
|
be02572d58 | ||
|
|
393f469eb3 | ||
|
|
6988039975 | ||
|
|
b25933b607 | ||
|
|
e013459b18 | ||
|
|
b062a3f938 | ||
|
|
980c865174 | ||
|
|
b4fe7f3a09 | ||
|
|
9c09861946 | ||
|
|
6e69bfbb28 | ||
|
|
9d42621fa4 | ||
|
|
c28990d871 | ||
|
|
74ac6fb6b9 | ||
|
|
a9108c1809 | ||
|
|
30f7288082 | ||
|
|
3436da65a4 | ||
|
|
b909bc8b58 | ||
|
|
6e57306a13 | ||
|
|
7f6f5c2a6a | ||
|
|
d50de2728f | ||
|
|
4fabd02d25 | ||
|
|
d718f3b6d0 | ||
|
|
ca24dc2d5f | ||
|
|
937a7e93f2 | ||
|
|
ae81b96b60 | ||
|
|
b8d78424ab | ||
|
|
0673245d0c | ||
|
|
14b9418cc5 | ||
|
|
5322bac5fc | ||
|
|
e0605b464b | ||
|
|
00a7403236 | ||
|
|
57b5f42847 | ||
|
|
a2a0715bd4 | ||
|
|
57cc3d1d3d | ||
|
|
7a4ff424fc | ||
|
|
ace442b992 | ||
|
|
53c1f120a8 | ||
|
|
37a89918e0 | ||
|
|
e0f468f6c1 | ||
|
|
5d149e4d50 | ||
|
|
94c7899257 | ||
|
|
ab7ecc9c30 | ||
|
|
2e024823d2 | ||
|
|
456ca3d587 | ||
|
|
0fce8ef178 | ||
|
|
6e07283dd5 | ||
|
|
ffa4ff1a2e | ||
|
|
bb437646fc | ||
|
|
7492060525 | ||
|
|
b3c49e94a0 | ||
|
|
cece8c8bf0 | ||
|
|
834c3378af | ||
|
|
c91cf68754 | ||
|
|
3298bf4f00 | ||
|
|
ae6196507d | ||
|
|
22af93d851 | ||
|
|
cac6e45a67 | ||
|
|
ad7089a6d0 | ||
|
|
8cd5f65a6f | ||
|
|
ab44c24333 | ||
|
|
b7ad4c4c30 | ||
|
|
10adec5f1b | ||
|
|
7414e9d196 | ||
|
|
22601b0b63 | ||
|
|
b0d80c4b3e | ||
|
|
384fa43fc3 | ||
|
|
8a604b93ab | ||
|
|
9eec7c3206 | ||
|
|
b82ddf9cfb | ||
|
|
7e5f5ebf86 | ||
|
|
9f5f747dc3 | ||
|
|
d2e9b621ab | ||
|
|
6efd5fa2b9 | ||
|
|
02c0a1e77e | ||
|
|
dc4ffa8d9b | ||
|
|
3a58c4c3a0 | ||
|
|
03b16ed2b1 | ||
|
|
bccee85c8f | ||
|
|
95b77a5215 | ||
|
|
b697bbb5b5 | ||
|
|
9494623869 | ||
|
|
76ae9da9db | ||
|
|
d4e8e0f5ab | ||
|
|
68a675cc68 | ||
|
|
a9246333fd | ||
|
|
bc0af67aaf | ||
|
|
6a157cf8bb | ||
|
|
00f276d23f | ||
|
|
1300a4bc8c | ||
|
|
286452c7f0 | ||
|
|
be9371ca8f | ||
|
|
df40cd233f | ||
|
|
e9c2b280db | ||
|
|
6a4a950a3c | ||
|
|
d3c2eab0b3 | ||
|
|
af96de6552 | ||
|
|
50556f3b35 | ||
|
|
b2b9ded12f | ||
|
|
10bff4ecc4 | ||
|
|
5c1fa3e70e | ||
|
|
5ccebce777 | ||
|
|
3b7c4c51d5 | ||
|
|
1e0af59f69 | ||
|
|
e64bafed3a | ||
|
|
112695e4da | ||
|
|
ba90e3c990 | ||
|
|
92f05a67a4 | ||
|
|
ca7a44d024 | ||
|
|
3e3ed8c5c9 | ||
|
|
46782ad79b | ||
|
|
2286204354 | ||
|
|
395a2a3724 | ||
|
|
38057f0d2e | ||
|
|
1ab9dc8293 | ||
|
|
6aa7b04f79 | ||
|
|
ddd518a161 | ||
|
|
e2f36ee608 | ||
|
|
ea6a5b03e0 | ||
|
|
b3bccabc66 | ||
|
|
495128ba95 | ||
|
|
c7ca350cd3 | ||
|
|
c0c2fd0782 | ||
|
|
b7159c15cc | ||
|
|
9bf5b0defa | ||
|
|
bd8d418a95 | ||
|
|
3a75d59c3d | ||
|
|
5be465bd86 | ||
|
|
4fc7939848 | ||
|
|
2b3b4e0f60 | ||
|
|
e0dea577ee | ||
|
|
a8cb9ee013 | ||
|
|
ebfffaa38f | ||
|
|
ec850e607f | ||
|
|
370becdfc2 | ||
|
|
2c97fbabbd | ||
|
|
a2bbe3dda4 | ||
|
|
2eea5d4cb4 | ||
|
|
7a48d9ee82 | ||
|
|
e30fdffd1e | ||
|
|
2eec687474 | ||
|
|
680d6bbbf8 | ||
|
|
e194dc5306 | ||
|
|
8cfb52ddbb | ||
|
|
5d5298087f | ||
|
|
61e4a1adf9 | ||
|
|
42a28ac1ba | ||
|
|
eef62bf4e9 | ||
|
|
b7ba7e8a7b | ||
|
|
6f36f0f930 | ||
|
|
444ca3f669 | ||
|
|
cdd1d78bf2 | ||
|
|
427551eabf | ||
|
|
7bb437146d | ||
|
|
83eea230f3 | ||
|
|
0475d015fe | ||
|
|
364f8e7b5d | ||
|
|
af18413d97 | ||
|
|
6640293087 | ||
|
|
ad324a39ae | ||
|
|
24b2af5218 | ||
|
|
9ca11c06b7 | ||
|
|
23cdebddc4 | ||
|
|
ccd916babe | ||
|
|
03b5891cf7 | ||
|
|
eaee492dbc | ||
|
|
d2243757a3 | ||
|
|
2f47e5c766 | ||
|
|
d558bcfad8 | ||
|
|
87e502c6bc | ||
|
|
94c82a189d | ||
|
|
b01cf0dd54 | ||
|
|
a9b97aa6f4 | ||
|
|
50d9c7d5a4 | ||
|
|
e67b26eee9 | ||
|
|
6aafb46807 | ||
|
|
bc9b8c8239 | ||
|
|
0013256e81 | ||
|
|
1913320cbe | ||
|
|
ae76e473e1 | ||
|
|
8e1a7a8646 | ||
|
|
e82687ddf4 | ||
|
|
7d2b946d0b | ||
|
|
c7db9febb0 | ||
|
|
7ad13cdbdb | ||
|
|
c5a46e7435 | ||
|
|
17c4ec4812 | ||
|
|
4a649e3b14 | ||
|
|
8a44c879c6 | ||
|
|
e0e3ef1c57 | ||
|
|
4555ad5d1f | ||
|
|
6ac120f299 | ||
|
|
e41f0b341c | ||
|
|
b3b155d488 | ||
|
|
e74733ab9e | ||
|
|
11ab0be11a | ||
|
|
1281fdf0f2 | ||
|
|
34ebb29726 | ||
|
|
f9edf76e7c | ||
|
|
970b2f9d38 | ||
|
|
292accde2b | ||
|
|
ee3d0513ad | ||
|
|
8fdf88b8e3 | ||
|
|
0c52275bdb | ||
|
|
cde1e8739a | ||
|
|
a9b3b2e327 | ||
|
|
6ac5d80286 | ||
|
|
ec1a2adf9c | ||
|
|
5b6bbf4ab2 | ||
|
|
2f0088039d | ||
|
|
b023f0c0f2 | ||
|
|
2c91f0d750 | ||
|
|
5922742d56 | ||
|
|
681ba6d520 | ||
|
|
7a5e36f3f5 | ||
|
|
289e9aeb9d | ||
|
|
d1561b74eb | ||
|
|
bb7ac9edb5 | ||
|
|
010d0bfeea | ||
|
|
e05997c25e | ||
|
|
c5bce4a465 | ||
|
|
2c9619bc1d | ||
|
|
18f5c985d9 | ||
|
|
a197acfcd3 | ||
|
|
18af149e91 | ||
|
|
614cff89bc | ||
|
|
a7227ee01b | ||
|
|
232faba796 | ||
|
|
d7d629911b | ||
|
|
6e90406e0f | ||
|
|
c868a3eef3 | ||
|
|
20e9ce8a62 | ||
|
|
704d56e241 | ||
|
|
b934677a81 | ||
|
|
2d038b57b2 | ||
|
|
0b740c9baa | ||
|
|
ac3e6e3944 | ||
|
|
d2270a2261 | ||
|
|
1250cd4630 | ||
|
|
f8cf09a230 | ||
|
|
e4224a396b | ||
|
|
21bd16bb59 | ||
|
|
9218684759 | ||
|
|
d5819a7ca7 | ||
|
|
0ca37e613c | ||
|
|
ca1afa7213 | ||
|
|
5f356b9993 | ||
|
|
d6f5d0c6b1 | ||
|
|
62ec10a7f5 | ||
|
|
736a1819aa | ||
|
|
f3e7ac0a2c | ||
|
|
3678cba0be | ||
|
|
7af186fddf | ||
|
|
7cc200766e | ||
|
|
db7ef635c0 | ||
|
|
0eb1bc1a02 | ||
|
|
63fcf41bea | ||
|
|
2791a753bf | ||
|
|
a09a0e3511 | ||
|
|
0ce8745928 | ||
|
|
d8ae925425 | ||
|
|
fe8bbc2da7 | ||
|
|
77c286cf02 | ||
|
|
3ec6400d70 | ||
|
|
a6ebffb695 | ||
|
|
767fa91eae | ||
|
|
5f74db4500 | ||
|
|
511c12dd39 | ||
|
|
893d20f735 | ||
|
|
35cfd25db3 | ||
|
|
658f8bdee7 | ||
|
|
5518f24ec3 | ||
|
|
b93638ef1e | ||
|
|
a1549901ce | ||
|
|
78aa59c68b | ||
|
|
ec0dd6e34a | ||
|
|
3294774148 | ||
|
|
cef79ca579 | ||
|
|
106364a45c | ||
|
|
9355e3f5f5 | ||
|
|
f15763518a | ||
|
|
137da7e4b6 | ||
|
|
9f4b720a63 | ||
|
|
76fcd96dae | ||
|
|
11fec7d4d1 | ||
|
|
0b4a51930c | ||
|
|
c66755b661 | ||
|
|
4d8cda1c3b | ||
|
|
3af36943e8 | ||
|
|
8ef7274ee6 | ||
|
|
d9fcc45d05 | ||
|
|
ce7c11625f | ||
|
|
5a207cce8f | ||
|
|
b3ae6bcd3f | ||
|
|
5468528748 | ||
|
|
69f4ffb851 | ||
|
|
2be4fbb835 | ||
|
|
062c3c00a2 | ||
|
|
92b87c2fec | ||
|
|
3954bcf396 | ||
|
|
b7999a9bc1 | ||
|
|
a0d847f636 | ||
|
|
217b5cc72d | ||
|
|
4092fd21dc | ||
|
|
2a4b32dee2 | ||
|
|
daf3e99b96 | ||
|
|
b177a29d3f | ||
|
|
65111eb2b3 | ||
|
|
0cfaa76e45 | ||
|
|
2ae2d6cd1d | ||
|
|
204a73c1d9 | ||
|
|
08e2352f7b | ||
|
|
f907b62526 | ||
|
|
233b52735e | ||
|
|
38dabdbb3a | ||
|
|
84a46753ab | ||
|
|
d5b1608216 | ||
|
|
25487fa5ee | ||
|
|
2dcda8a8ac | ||
|
|
98dd6d068a | ||
|
|
5124c1e0d9 | ||
|
|
a47c8618ec | ||
|
|
87ad4fc4b2 | ||
|
|
92a5f00ffb | ||
|
|
aea090045b | ||
|
|
a4c9053d40 | ||
|
|
2f2d27fd82 | ||
|
|
019eb13681 | ||
|
|
450eb91fe2 | ||
|
|
0551bc90a5 | ||
|
|
a0ea6f6b6b | ||
|
|
1a9ac3b1f9 | ||
|
|
625717daa8 | ||
|
|
74f8e603d9 | ||
|
|
d0d89d39ef | ||
|
|
b64c39dfe7 | ||
|
|
3fb0e4872a | ||
|
|
8d9e9e013c | ||
|
|
26ec845921 | ||
|
|
c112d7334d | ||
|
|
6c11f94013 | ||
|
|
6e25e65085 | ||
|
|
8f98592ac9 | ||
|
|
b9040669a0 | ||
|
|
647210a4b9 | ||
|
|
8fea0529c1 | ||
|
|
6a3ceaa377 | ||
|
|
3e45b83065 | ||
|
|
33ea606f45 | ||
|
|
5af2c51e78 | ||
|
|
d3bdb8ea6d | ||
|
|
1c51d3db0f | ||
|
|
475007d63a | ||
|
|
92f218207b | ||
|
|
ad09367a92 | ||
|
|
9921f8cc3a | ||
|
|
4e71a1702b | ||
|
|
b201cfaa0f | ||
|
|
ae3611730a | ||
|
|
934319fc28 | ||
|
|
44ad9628c9 | ||
|
|
1f4abb265a | ||
|
|
ae2cf1f598 | ||
|
|
b81f98b8a6 | ||
|
|
b72401b47b | ||
|
|
ce6dbe41a9 | ||
|
|
95c6ed0568 | ||
|
|
bc875a9df1 | ||
|
|
a97e4252e3 | ||
|
|
9a7488a5ce | ||
|
|
20ec1173f4 | ||
|
|
949729ff5c | ||
|
|
c5a7a85a4e | ||
|
|
3c6fa9126a | ||
|
|
d784401215 | ||
|
|
71a7c16ee0 | ||
|
|
d1f65d8dc1 | ||
|
|
8b3df18bcc | ||
|
|
6655f43282 | ||
|
|
28d6277396 | ||
|
|
db45970a66 | ||
|
|
4c572ffe95 | ||
|
|
001b147450 | ||
|
|
8441cff1d7 | ||
|
|
6258f72a00 | ||
|
|
14a611775c | ||
|
|
80b3fdf2f7 | ||
|
|
6632188606 | ||
|
|
6afb463e9b | ||
|
|
47c2ec2d0b | ||
|
|
342b671d05 | ||
|
|
983a213bdc | ||
|
|
22603d19e0 | ||
|
|
373ad49157 | ||
|
|
bc66b3fb8d | ||
|
|
3bae595182 | ||
|
|
8d07ba0d51 | ||
|
|
b61f50665e | ||
|
|
0ad76c3380 | ||
|
|
bd9e0f3934 | ||
|
|
359fb8fa3a | ||
|
|
4c8aad0d1b | ||
|
|
d765d77e9b | ||
|
|
af41cdfc8b | ||
|
|
226a7521ed | ||
|
|
5ffa924488 | ||
|
|
6b47aaab82 | ||
|
|
f39340ff6b | ||
|
|
ea09c0846f | ||
|
|
46b7181f13 | ||
|
|
f0ea77b230 | ||
|
|
562fdfc8f9 | ||
|
|
5ce74b5958 | ||
|
|
470b2822a3 | ||
|
|
8bcaca435a | ||
|
|
f72bb966f8 | ||
|
|
1671c2afb2 | ||
|
|
ce8b7a2a69 | ||
|
|
46e181aa8b | ||
|
|
0a44bfdca3 | ||
|
|
8121e04200 | ||
|
|
e31705b5ab | ||
|
|
199cc700a3 | ||
|
|
eab4b4ccd7 | ||
|
|
1111f18eb4 | ||
|
|
8181f9e362 | ||
|
|
f93d256190 | ||
|
|
80e133f16d | ||
|
|
1f11f80641 | ||
|
|
1d861dc37a | ||
|
|
c1807d8408 | ||
|
|
e09afb4b44 | ||
|
|
0d3a9d481f | ||
|
|
4379bd4cbb | ||
|
|
2649b638dd | ||
|
|
64b4165c8d | ||
|
|
9d658aaa5a | ||
|
|
a61b7f7e7c | ||
|
|
c4b502a470 | ||
|
|
ee57054d05 | ||
|
|
26ff18575c | ||
|
|
760632b292 | ||
|
|
8259f9b7fa | ||
|
|
0b3e0dd1d2 | ||
|
|
72f99ff953 | ||
|
|
cf5803e44c | ||
|
|
cce731c3c2 | ||
|
|
2da8c48be1 | ||
|
|
1837caa70d | ||
|
|
a3598193a0 | ||
|
|
ccb6238de1 | ||
|
|
d6fb25c439 | ||
|
|
416c8b1da3 | ||
|
|
100d6655df | ||
|
|
c09f8e4ddc | ||
|
|
3e16468423 | ||
|
|
642ae83d86 | ||
|
|
f6615cac41 | ||
|
|
44b48d9518 | ||
|
|
e455ba4ed5 | ||
|
|
8b7721ebbb | ||
|
|
f77f27163d | ||
|
|
14099f1b93 | ||
|
|
6df90ad9fd | ||
|
|
99a1e3f3a3 | ||
|
|
1366d070fc | ||
|
|
ad7f4c0317 | ||
|
|
b6927970f1 | ||
|
|
9a5c9df809 | ||
|
|
b705f260f4 | ||
|
|
5f4552391f | ||
|
|
e2742953a6 | ||
|
|
1daa7068b2 | ||
|
|
881dfe8179 | ||
|
|
f079cdf479 | ||
|
|
c6e5d90eff | ||
|
|
c49c6ac97a | ||
|
|
5292e855c0 | ||
|
|
179ddbe88b | ||
|
|
465a970724 | ||
|
|
6e974b5f04 | ||
|
|
f75f0dbad6 | ||
|
|
0a8d6bc402 | ||
|
|
58e95cd11e | ||
|
|
641303a361 | ||
|
|
aa3c7b3271 | ||
|
|
a669abf16b | ||
|
|
d481d887bc | ||
|
|
aec642febb | ||
|
|
1cb6498fdb | ||
|
|
a0281f5acb | ||
|
|
7047a2c1af | ||
|
|
56ad56c812 | ||
|
|
2ef5579eae | ||
|
|
f01dfe858d | ||
|
|
7652d2abb0 | ||
|
|
3223a97dc6 | ||
|
|
b3988621c5 | ||
|
|
ca88b25da6 | ||
|
|
5525602df0 | ||
|
|
d3cd21ccf8 | ||
|
|
3be9ba14f3 | ||
|
|
88ed8e1cd6 | ||
|
|
9c0cb90997 | ||
|
|
c7e2151a4b | ||
|
|
15b17f9334 | ||
|
|
9e57be4b5c | ||
|
|
09e246f306 | ||
|
|
5cdd9ab7e1 | ||
|
|
5cfa72a130 | ||
|
|
40b086d6e8 | ||
|
|
f0ea093de8 | ||
|
|
eff31a3361 | ||
|
|
95c9aa1ccb | ||
|
|
2ad29f410d | ||
|
|
a775aa6389 | ||
|
|
e6c4571191 | ||
|
|
e76e68b211 | ||
|
|
66113c2a62 | ||
|
|
b7fcb35a39 | ||
|
|
dcee8936c1 | ||
|
|
44abe925df | ||
|
|
f10be072ff | ||
|
|
f0730c6489 | ||
|
|
d8eed6018f | ||
|
|
c173bf1c62 | ||
|
|
52714cedd4 | ||
|
|
94cf391ef1 | ||
|
|
2b2176a3c1 | ||
|
|
b00c77dc62 | ||
|
|
fd866d1801 | ||
|
|
aa14e223ee | ||
|
|
11c26ebb55 | ||
|
|
2d5588c5f0 | ||
|
|
47e4ee4370 | ||
|
|
cf19a2a59f | ||
|
|
fff21a0b35 | ||
|
|
faa26650c9 | ||
|
|
c81fb88035 | ||
|
|
9c4b43b494 | ||
|
|
33929489b9 | ||
|
|
c111134a55 | ||
|
|
de4ef24f75 | ||
|
|
b1b7f3541c | ||
|
|
925dd3e59e | ||
|
|
9242998db1 | ||
|
|
de6e6c764e | ||
|
|
68f0d45485 | ||
|
|
0b542a9706 | ||
|
|
754b5133e9 | ||
|
|
5002f3ae35 | ||
|
|
7a75bb2121 | ||
|
|
753f4cfc26 | ||
|
|
5c87dbf5a8 | ||
|
|
d7f807b71f | ||
|
|
d4fd589638 | ||
|
|
d56313acba | ||
|
|
b950022894 | ||
|
|
87bba2e8d3 | ||
|
|
de6a401a22 | ||
|
|
69de33e024 | ||
|
|
e173e032bc | ||
|
|
c28cc0f1ac | ||
|
|
5e47c648ed | ||
|
|
5b2b436fab |
@@ -1,42 +0,0 @@
|
||||
# This is a Dockerfile for Developer Container
|
||||
|
||||
# Use the Python base image
|
||||
ARG VARIANT="3.11-bullseye"
|
||||
FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} AS langchain-dev-base
|
||||
|
||||
USER vscode
|
||||
|
||||
# Define the version of Poetry to install (default is 1.4.2)
|
||||
# Define the directory of python virtual environment
|
||||
ARG PYTHON_VIRTUALENV_HOME=/home/vscode/langchain-py-env \
|
||||
POETRY_VERSION=1.4.2
|
||||
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=false \
|
||||
POETRY_NO_INTERACTION=true
|
||||
|
||||
# Create a Python virtual environment for Poetry and install it
|
||||
RUN python3 -m venv ${PYTHON_VIRTUALENV_HOME} && \
|
||||
$PYTHON_VIRTUALENV_HOME/bin/pip install --upgrade pip && \
|
||||
$PYTHON_VIRTUALENV_HOME/bin/pip install poetry==${POETRY_VERSION}
|
||||
|
||||
ENV PATH="$PYTHON_VIRTUALENV_HOME/bin:$PATH" \
|
||||
VIRTUAL_ENV=$PYTHON_VIRTUALENV_HOME
|
||||
|
||||
# Setup for bash
|
||||
RUN poetry completions bash >> /home/vscode/.bash_completion && \
|
||||
echo "export PATH=$PYTHON_VIRTUALENV_HOME/bin:$PATH" >> ~/.bashrc
|
||||
|
||||
# Set the working directory for the app
|
||||
WORKDIR /workspaces/langchain
|
||||
|
||||
# Use a multi-stage build to install dependencies
|
||||
FROM langchain-dev-base AS langchain-dev-dependencies
|
||||
|
||||
ARG PYTHON_VIRTUALENV_HOME
|
||||
|
||||
# Copy only the dependency files for installation
|
||||
COPY pyproject.toml poetry.lock poetry.toml ./
|
||||
|
||||
# Install the Poetry dependencies (this layer will be cached as long as the dependencies don't change)
|
||||
RUN poetry install --no-interaction --no-ansi --with dev,test,docs
|
||||
|
||||
44
.devcontainer/README.md
Normal file
44
.devcontainer/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Dev container
|
||||
|
||||
This project includes a [dev container](https://containers.dev/), which lets you use a container as a full-featured dev environment.
|
||||
|
||||
You can use the dev container configuration in this folder to build and run the app without needing to install any of its tools locally! You can use it in [GitHub Codespaces](https://github.com/features/codespaces) or the [VS Code Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers).
|
||||
|
||||
## GitHub Codespaces
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
|
||||
You may use the button above, or follow these steps to open this repo in a Codespace:
|
||||
1. Click the **Code** drop-down menu at the top of https://github.com/langchain-ai/langchain.
|
||||
1. Click on the **Codespaces** tab.
|
||||
1. Click **Create codespace on master** .
|
||||
|
||||
For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
|
||||
|
||||
## VS Code Dev Containers
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
|
||||
Note: If you click the link above you will open the main repo (langchain-ai/langchain) and not your local cloned repo. This is fine if you only want to run and test the library, but if you want to contribute you can use the link below and replace with your username and cloned repo name:
|
||||
```
|
||||
https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/<yourusername>/<yourclonedreponame>
|
||||
|
||||
```
|
||||
Then you will have a local cloned repo where you can contribute and then create pull requests.
|
||||
|
||||
If you already have VS Code and Docker installed, you can use the button above to get started. This will cause VS Code to automatically install the Dev Containers extension if needed, clone the source code into a container volume, and spin up a dev container for use.
|
||||
|
||||
Alternatively you can also follow these steps to open this repo in a container using the VS Code Dev Containers extension:
|
||||
|
||||
1. If this is your first time using a development container, please ensure your system meets the pre-reqs (i.e. have Docker installed) in the [getting started steps](https://aka.ms/vscode-remote/containers/getting-started).
|
||||
|
||||
2. Open a locally cloned copy of the code:
|
||||
|
||||
- Fork and Clone this repository to your local filesystem.
|
||||
- Press <kbd>F1</kbd> and select the **Dev Containers: Open Folder in Container...** command.
|
||||
- Select the cloned copy of this folder, wait for the container to start, and try things out!
|
||||
|
||||
You can learn more in the [Dev Containers documentation](https://code.visualstudio.com/docs/devcontainers/containers).
|
||||
|
||||
## Tips and tricks
|
||||
|
||||
* If you are working with the same repository folder in a container and Windows, you'll want consistent line endings (otherwise you may see hundreds of changes in the SCM view). The `.gitattributes` file in the root of this repo will disable line ending conversion and should prevent this. See [tips and tricks](https://code.visualstudio.com/docs/devcontainers/tips-and-tricks#_resolving-git-line-ending-issues-in-containers-resulting-in-many-modified-files) for more info.
|
||||
* If you'd like to review the contents of the image used in this dev container, you can check it out in the [devcontainers/images](https://github.com/devcontainers/images/tree/main/src/python) repo.
|
||||
@@ -1,24 +1,26 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-dockerfile
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose
|
||||
{
|
||||
"dockerComposeFile": "./docker-compose.yaml",
|
||||
"service": "langchain",
|
||||
"workspaceFolder": "/workspaces/langchain",
|
||||
// Name for the dev container
|
||||
"name": "langchain",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"ms-python.python"
|
||||
],
|
||||
"settings": {
|
||||
"python.defaultInterpreterPath": "/home/vscode/langchain-py-env/bin/python3.11"
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
"features": {},
|
||||
// Point to a Docker Compose file
|
||||
"dockerComposeFile": "./docker-compose.yaml",
|
||||
|
||||
// Required when using Docker Compose. The name of the service to connect to once running
|
||||
"service": "langchain",
|
||||
|
||||
// The optional 'workspaceFolder' property is the path VS Code should open by default when
|
||||
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
|
||||
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
|
||||
|
||||
// Prevent the container from shutting down
|
||||
"overrideCommand": true
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features
|
||||
// "features": {
|
||||
// "ghcr.io/devcontainers-contrib/features/poetry:2": {}
|
||||
// }
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
@@ -26,8 +28,9 @@
|
||||
// Uncomment the next line to run commands after the container is created.
|
||||
// "postCreateCommand": "cat /etc/os-release",
|
||||
|
||||
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "devcontainer"
|
||||
"remoteUser": "vscode",
|
||||
"overrideCommand": true
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "root"
|
||||
}
|
||||
|
||||
@@ -2,10 +2,11 @@ version: '3'
|
||||
services:
|
||||
langchain:
|
||||
build:
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
context: ../
|
||||
dockerfile: libs/langchain/dev.Dockerfile
|
||||
context: ..
|
||||
volumes:
|
||||
- ../:/workspaces/langchain
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
- ..:/workspaces:cached
|
||||
networks:
|
||||
- langchain-network
|
||||
# environment:
|
||||
|
||||
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
* text=auto eol=lf
|
||||
*.{cmd,[cC][mM][dD]} text eol=crlf
|
||||
*.{bat,[bB][aA][tT]} text eol=crlf
|
||||
132
.github/CODE_OF_CONDUCT.md
vendored
Normal file
132
.github/CODE_OF_CONDUCT.md
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
conduct@langchain.dev.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
255
.github/CONTRIBUTING.md
vendored
255
.github/CONTRIBUTING.md
vendored
@@ -1,50 +1,47 @@
|
||||
# Contributing to LangChain
|
||||
|
||||
Hi there! Thank you for even being interested in contributing to LangChain.
|
||||
As an open source project in a rapidly developing field, we are extremely open
|
||||
to contributions, whether they be in the form of new features, improved infra, better documentation, or bug fixes.
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether they involve new features, improved infrastructure, better documentation, or bug fixes.
|
||||
|
||||
## 🗺️ Guidelines
|
||||
|
||||
### 👩💻 Contributing Code
|
||||
|
||||
To contribute to this project, please follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
|
||||
Please do not try to push directly to this repo unless you are maintainer.
|
||||
To contribute to this project, please follow the ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
|
||||
Please do not try to push directly to this repo unless you are a maintainer.
|
||||
|
||||
Please follow the checked-in pull request template when opening pull requests. Note related issues and tag relevant
|
||||
maintainers.
|
||||
|
||||
Pull requests cannot land without passing the formatting, linting and testing checks first. See
|
||||
[Common Tasks](#-common-tasks) for how to run these checks locally.
|
||||
Pull requests cannot land without passing the formatting, linting, and testing checks first. See [Testing](#testing) and
|
||||
[Formatting and Linting](#formatting-and-linting) for how to run these checks locally.
|
||||
|
||||
It's essential that we maintain great documentation and testing. If you:
|
||||
- Fix a bug
|
||||
- Add a relevant unit or integration test when possible. These live in `tests/unit_tests` and `tests/integration_tests`.
|
||||
- Make an improvement
|
||||
- Update any affected example notebooks and documentation. These lives in `docs`.
|
||||
- Update any affected example notebooks and documentation. These live in `docs`.
|
||||
- Update unit and integration tests when relevant.
|
||||
- Add a feature
|
||||
- Add a demo notebook in `docs/modules`.
|
||||
- Add unit and integration tests.
|
||||
|
||||
We're a small, building-oriented team. If there's something you'd like to add or change, opening a pull request is the
|
||||
We are a small, progress-oriented team. If there's something you'd like to add or change, opening a pull request is the
|
||||
best way to get our attention.
|
||||
|
||||
### 🚩GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/hwchase17/langchain/issues) page is kept up to date
|
||||
with bugs, improvements, and feature requests.
|
||||
Our [issues](https://github.com/langchain-ai/langchain/issues) page is kept up to date with bugs, improvements, and feature requests.
|
||||
|
||||
There is a taxonomy of labels to help with sorting and discovery of issues of interest. Please use these to help
|
||||
organize issues.
|
||||
There is a taxonomy of labels to help with sorting and discovery of issues of interest. Please use these to help organize issues.
|
||||
|
||||
If you start working on an issue, please assign it to yourself.
|
||||
|
||||
If you are adding an issue, please try to keep it focused on a single, modular bug/improvement/feature.
|
||||
If two issues are related, or blocking, please link them rather than combining them.
|
||||
|
||||
We will try to keep these issues as up to date as possible, though
|
||||
with the rapid rate of develop in this field some may get out of date.
|
||||
We will try to keep these issues as up-to-date as possible, though
|
||||
with the rapid rate of development in this field some may get out of date.
|
||||
If you notice this happening, please let us know.
|
||||
|
||||
### 🙋Getting Help
|
||||
@@ -59,65 +56,63 @@ we do not want these to get in the way of getting good code into the codebase.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
This project uses [Poetry](https://python-poetry.org/) as a dependency manager. Check out Poetry's [documentation on how to install it](https://python-poetry.org/docs/#installation) on your system before proceeding.
|
||||
This quick start guide explains how to run the repository locally.
|
||||
For a [development container](https://containers.dev/), see the [.devcontainer folder](https://github.com/langchain-ai/langchain/tree/master/.devcontainer).
|
||||
|
||||
❗Note: If you use `Conda` or `Pyenv` as your environment / package manager, avoid dependency conflicts by doing the following first:
|
||||
1. *Before installing Poetry*, create and activate a new Conda env (e.g. `conda create -n langchain python=3.9`)
|
||||
2. Install Poetry (see above)
|
||||
3. Tell Poetry to use the virtualenv python environment (`poetry config virtualenvs.prefer-active-python true`)
|
||||
4. Continue with the following steps.
|
||||
### Dependency Management: Poetry and other env/dependency managers
|
||||
|
||||
To install requirements:
|
||||
This project utilizes [Poetry](https://python-poetry.org/) v1.6.1+ as a dependency manager.
|
||||
|
||||
❗Note: *Before installing Poetry*, if you use `Conda`, create and activate a new Conda env (e.g. `conda create -n langchain python=3.9`)
|
||||
|
||||
Install Poetry: **[documentation on how to install it](https://python-poetry.org/docs/#installation)**.
|
||||
|
||||
❗Note: If you use `Conda` or `Pyenv` as your environment/package manager, after installing Poetry,
|
||||
tell Poetry to use the virtualenv python environment (`poetry config virtualenvs.prefer-active-python true`)
|
||||
|
||||
### Core vs. Experimental
|
||||
|
||||
This repository contains two separate projects:
|
||||
- `langchain`: core langchain code, abstractions, and use cases.
|
||||
- `langchain.experimental`: see the [Experimental README](https://github.com/langchain-ai/langchain/tree/master/libs/experimental/README.md) for more information.
|
||||
|
||||
Each of these has its own development environment. Docs are run from the top-level makefile, but development
|
||||
is split across separate test & release flows.
|
||||
|
||||
For this quickstart, start with langchain core:
|
||||
|
||||
```bash
|
||||
poetry install -E all
|
||||
cd libs/langchain
|
||||
```
|
||||
|
||||
This will install all requirements for running the package, examples, linting, formatting, tests, and coverage. Note the `-E all` flag will install all optional dependencies necessary for integration testing.
|
||||
### Local Development Dependencies
|
||||
|
||||
❗Note: If you're running Poetry 1.4.1 and receive a `WheelFileValidationError` for `debugpy` during installation, you can try either downgrading to Poetry 1.4.0 or disabling "modern installation" (`poetry config installer.modern-installation false`) and re-install requirements. See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
|
||||
Now, you should be able to run the common tasks in the following section. To double check, run `make test`, all tests should pass. If they don't you may need to pip install additional dependencies, such as `numexpr` and `openapi_schema_pydantic`.
|
||||
|
||||
## ✅ Common Tasks
|
||||
|
||||
Type `make` for a list of common tasks.
|
||||
|
||||
### Code Formatting
|
||||
|
||||
Formatting for this project is done via a combination of [Black](https://black.readthedocs.io/en/stable/) and [isort](https://pycqa.github.io/isort/).
|
||||
|
||||
To run formatting for this project:
|
||||
Install langchain development requirements (for running langchain, running examples, linting, formatting, tests, and coverage):
|
||||
|
||||
```bash
|
||||
make format
|
||||
poetry install --with test
|
||||
```
|
||||
|
||||
### Linting
|
||||
|
||||
Linting for this project is done via a combination of [Black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), [flake8](https://flake8.pycqa.org/en/latest/), and [mypy](http://mypy-lang.org/).
|
||||
|
||||
To run linting for this project:
|
||||
Then verify dependency installation:
|
||||
|
||||
```bash
|
||||
make lint
|
||||
make test
|
||||
```
|
||||
|
||||
We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
If the tests don't pass, you may need to pip install additional dependencies, such as `numexpr` and `openapi_schema_pydantic`.
|
||||
|
||||
### Coverage
|
||||
|
||||
Code coverage (i.e. the amount of code that is covered by unit tests) helps identify areas of the code that are potentially more or less brittle.
|
||||
|
||||
To get a report of current coverage, run the following:
|
||||
|
||||
```bash
|
||||
make coverage
|
||||
```
|
||||
If during installation you receive a `WheelFileValidationError` for `debugpy`, please make sure you are running
|
||||
Poetry v1.6.1+. This bug was present in older versions of Poetry (e.g. 1.4.1) and has been resolved in newer releases.
|
||||
If you are still seeing this bug on v1.6.1, you may also try disabling "modern installation"
|
||||
(`poetry config installer.modern-installation false`) and re-installing requirements.
|
||||
See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
|
||||
### Testing
|
||||
|
||||
_some test dependencies are optional; see section about optional dependencies_.
|
||||
|
||||
Unit tests cover modular logic that does not require calls to outside APIs.
|
||||
If you add new logic, please add a unit test.
|
||||
|
||||
To run unit tests:
|
||||
|
||||
@@ -131,21 +126,118 @@ To run unit tests in Docker:
|
||||
make docker_tests
|
||||
```
|
||||
|
||||
If you add new logic, please add a unit test.
|
||||
There are also [integration tests and code-coverage](https://github.com/langchain-ai/langchain/tree/master/libs/langchain/tests/README.md) available.
|
||||
|
||||
Integration tests cover logic that requires making calls to outside APIs (often integration with other services).
|
||||
### Formatting and Linting
|
||||
|
||||
To run integration tests:
|
||||
Run these locally before submitting a PR; the CI system will check also.
|
||||
|
||||
#### Code Formatting
|
||||
|
||||
Formatting for this project is done via [ruff](https://docs.astral.sh/ruff/rules/).
|
||||
|
||||
To run formatting for docs, cookbook and templates:
|
||||
|
||||
```bash
|
||||
make integration_tests
|
||||
make format
|
||||
```
|
||||
|
||||
If you add support for a new external API, please add a new integration test.
|
||||
To run formatting for a library, run the same command from the relevant library directory:
|
||||
|
||||
### Adding a Jupyter Notebook
|
||||
```bash
|
||||
cd libs/{LIBRARY}
|
||||
make format
|
||||
```
|
||||
|
||||
If you are adding a Jupyter notebook example, you'll want to install the optional `dev` dependencies.
|
||||
Additionally, you can run the formatter only on the files that have been modified in your current branch as compared to the master branch using the format_diff command:
|
||||
|
||||
```bash
|
||||
make format_diff
|
||||
```
|
||||
|
||||
This is especially useful when you have made changes to a subset of the project and want to ensure your changes are properly formatted without affecting the rest of the codebase.
|
||||
|
||||
#### Linting
|
||||
|
||||
Linting for this project is done via a combination of [ruff](https://docs.astral.sh/ruff/rules/) and [mypy](http://mypy-lang.org/).
|
||||
|
||||
To run linting for docs, cookbook and templates:
|
||||
|
||||
```bash
|
||||
make lint
|
||||
```
|
||||
|
||||
To run linting for a library, run the same command from the relevant library directory:
|
||||
|
||||
```bash
|
||||
cd libs/{LIBRARY}
|
||||
make lint
|
||||
```
|
||||
|
||||
In addition, you can run the linter only on the files that have been modified in your current branch as compared to the master branch using the lint_diff command:
|
||||
|
||||
```bash
|
||||
make lint_diff
|
||||
```
|
||||
|
||||
This can be very helpful when you've made changes to only certain parts of the project and want to ensure your changes meet the linting standards without having to check the entire codebase.
|
||||
|
||||
We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
|
||||
#### Spellcheck
|
||||
|
||||
Spellchecking for this project is done via [codespell](https://github.com/codespell-project/codespell).
|
||||
Note that `codespell` finds common typos, so it could have false-positive (correctly spelled but rarely used) and false-negatives (not finding misspelled) words.
|
||||
|
||||
To check spelling for this project:
|
||||
|
||||
```bash
|
||||
make spell_check
|
||||
```
|
||||
|
||||
To fix spelling in place:
|
||||
|
||||
```bash
|
||||
make spell_fix
|
||||
```
|
||||
|
||||
If codespell is incorrectly flagging a word, you can skip spellcheck for that word by adding it to the codespell config in the `pyproject.toml` file.
|
||||
|
||||
```python
|
||||
[tool.codespell]
|
||||
...
|
||||
# Add here:
|
||||
ignore-words-list = 'momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure'
|
||||
```
|
||||
|
||||
## Working with Optional Dependencies
|
||||
|
||||
Langchain relies heavily on optional dependencies to keep the Langchain package lightweight.
|
||||
|
||||
If you're adding a new dependency to Langchain, assume that it will be an optional dependency, and
|
||||
that most users won't have it installed.
|
||||
|
||||
Users who do not have the dependency installed should be able to **import** your code without
|
||||
any side effects (no warnings, no errors, no exceptions).
|
||||
|
||||
To introduce the dependency to the pyproject.toml file correctly, please do the following:
|
||||
|
||||
1. Add the dependency to the main group as an optional dependency
|
||||
```bash
|
||||
poetry add --optional [package_name]
|
||||
```
|
||||
2. Open pyproject.toml and add the dependency to the `extended_testing` extra
|
||||
3. Relock the poetry file to update the extra.
|
||||
```bash
|
||||
poetry lock --no-update
|
||||
```
|
||||
4. Add a unit test that the very least attempts to import the new code. Ideally, the unit
|
||||
test makes use of lightweight fixtures to test the logic of the code.
|
||||
5. Please use the `@pytest.mark.requires(package_name)` decorator for any tests that require the dependency.
|
||||
|
||||
## Adding a Jupyter Notebook
|
||||
|
||||
If you are adding a Jupyter Notebook example, you'll want to install the optional `dev` dependencies.
|
||||
|
||||
To install dev dependencies:
|
||||
|
||||
@@ -163,34 +255,58 @@ When you run `poetry install`, the `langchain` package is installed as editable
|
||||
|
||||
## Documentation
|
||||
|
||||
While the code is split between `langchain` and `langchain.experimental`, the documentation is one holistic thing.
|
||||
This covers how to get started contributing to documentation.
|
||||
|
||||
From the top-level of this repo, install documentation dependencies:
|
||||
|
||||
```bash
|
||||
poetry install
|
||||
```
|
||||
|
||||
### Contribute Documentation
|
||||
|
||||
Docs are largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code.
|
||||
The docs directory contains Documentation and API Reference.
|
||||
|
||||
Documentation is built using [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
API Reference are largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code.
|
||||
For that reason, we ask that you add good documentation to all classes and methods.
|
||||
|
||||
Similar to linting, we recognize documentation can be annoying. If you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
|
||||
### Build Documentation Locally
|
||||
|
||||
In the following commands, the prefix `api_` indicates that those are operations for the API Reference.
|
||||
|
||||
Before building the documentation, it is always a good idea to clean the build directory:
|
||||
|
||||
```bash
|
||||
make docs_clean
|
||||
make api_docs_clean
|
||||
```
|
||||
|
||||
Next, you can run the linkchecker to make sure all links are valid:
|
||||
|
||||
```bash
|
||||
make docs_linkcheck
|
||||
```
|
||||
|
||||
Finally, you can build the documentation as outlined below:
|
||||
Next, you can build the documentation as outlined below:
|
||||
|
||||
```bash
|
||||
make docs_build
|
||||
make api_docs_build
|
||||
```
|
||||
|
||||
Finally, run the link checker to ensure all links are valid:
|
||||
|
||||
```bash
|
||||
make docs_linkcheck
|
||||
make api_docs_linkcheck
|
||||
```
|
||||
|
||||
### Verify Documentation changes
|
||||
|
||||
After pushing documentation changes to the repository, you can preview and verify that the changes are
|
||||
what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page.
|
||||
This will take you to a preview of the documentation changes.
|
||||
This preview is created by [Vercel](https://vercel.com/docs/getting-started-with-vercel).
|
||||
|
||||
## 🏭 Release Process
|
||||
|
||||
As of now, LangChain has an ad hoc release process: releases are cut with high frequency by
|
||||
@@ -202,5 +318,4 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
|
||||
### 🌟 Recognition
|
||||
|
||||
If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)!
|
||||
If you have a Twitter account you would like us to mention, please let us know in the PR or in another manner.
|
||||
|
||||
If you have a Twitter account you would like us to mention, please let us know in the PR or through another means.
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
4
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve LangChain
|
||||
description: Submit a bug report to help us improve LangChain. To report a security issue, please instead use the security option below.
|
||||
labels: ["02 Bug Report"]
|
||||
body:
|
||||
- type: markdown
|
||||
@@ -46,7 +46,7 @@ body:
|
||||
- @agola11
|
||||
|
||||
Tools / Toolkits
|
||||
- @vowelparrot
|
||||
- ...
|
||||
|
||||
placeholder: "@Username ..."
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -27,4 +27,4 @@ body:
|
||||
attributes:
|
||||
label: Your contribution
|
||||
description: |
|
||||
Is there any way that you could help, e.g. by submitting a PR? Make sure to read the CONTRIBUTING.MD [readme](https://github.com/hwchase17/langchain/blob/master/.github/CONTRIBUTING.md)
|
||||
Is there any way that you could help, e.g. by submitting a PR? Make sure to read the CONTRIBUTING.MD [readme](https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md)
|
||||
|
||||
54
.github/PULL_REQUEST_TEMPLATE.md
vendored
54
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,46 +1,20 @@
|
||||
# Your PR Title (What it does)
|
||||
<!-- Thank you for contributing to LangChain!
|
||||
|
||||
<!--
|
||||
Thank you for contributing to LangChain! Your PR will appear in our next release under the title you set. Please make sure it highlights your valuable contribution.
|
||||
Replace this entire comment with:
|
||||
- **Description:** a description of the change,
|
||||
- **Issue:** the issue # it fixes (if applicable),
|
||||
- **Dependencies:** any dependencies required for this change,
|
||||
- **Tag maintainer:** for a quicker response, tag the relevant maintainer (see below),
|
||||
- **Twitter handle:** we announce bigger features on Twitter. If your PR gets announced, and you'd like a mention, we'll gladly shout you out!
|
||||
|
||||
Replace this with a description of the change, the issue it fixes (if applicable), and relevant context. List any dependencies required for this change.
|
||||
Please make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` to check this locally.
|
||||
|
||||
After you're done, someone will review your PR. They may suggest improvements. If no one reviews your PR within a few days, feel free to @-mention the same people again, as notifications can get lost.
|
||||
-->
|
||||
See contribution guidelines for more information on how to write/run tests, lint, etc:
|
||||
https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md
|
||||
|
||||
<!-- Remove if not applicable -->
|
||||
If you're adding a new integration, please include:
|
||||
1. a test for the integration, preferably unit tests that do not rely on network access,
|
||||
2. an example notebook showing its use. It lives in `docs/extras` directory.
|
||||
|
||||
Fixes # (issue)
|
||||
|
||||
## Before submitting
|
||||
|
||||
<!-- If you're adding a new integration, include an integration test and an example notebook showing its use! -->
|
||||
|
||||
## Who can review?
|
||||
|
||||
Community members can review the PR once tests pass. Tag maintainers/contributors who might be interested:
|
||||
|
||||
<!-- For a quicker response, figure out the right person to tag with @
|
||||
|
||||
@hwchase17 - project lead
|
||||
|
||||
Tracing / Callbacks
|
||||
- @agola11
|
||||
|
||||
Async
|
||||
- @agola11
|
||||
|
||||
DataLoaders
|
||||
- @eyurtsev
|
||||
|
||||
Models
|
||||
- @hwchase17
|
||||
- @agola11
|
||||
|
||||
Agents / Tools / Toolkits
|
||||
- @vowelparrot
|
||||
|
||||
VectorStores / Retrievers / Memory
|
||||
- @dev2049
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of @baskaryan, @eyurtsev, @hwchase17.
|
||||
-->
|
||||
|
||||
91
.github/actions/poetry_setup/action.yml
vendored
91
.github/actions/poetry_setup/action.yml
vendored
@@ -15,62 +15,77 @@ inputs:
|
||||
description: Poetry version
|
||||
required: true
|
||||
|
||||
install-command:
|
||||
description: Command run for installing dependencies
|
||||
required: false
|
||||
default: poetry install
|
||||
|
||||
cache-key:
|
||||
description: Cache key to use for manual handling of caching
|
||||
required: true
|
||||
|
||||
working-directory:
|
||||
description: Directory to run install-command in
|
||||
required: false
|
||||
default: ""
|
||||
description: Directory whose poetry.lock file should be cached
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-python@v4
|
||||
name: Setup python $${ inputs.python-version }}
|
||||
name: Setup python ${{ inputs.python-version }}
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- uses: actions/cache@v3
|
||||
id: cache-pip
|
||||
name: Cache Pip ${{ inputs.python-version }}
|
||||
id: cache-bin-poetry
|
||||
name: Cache Poetry binary - Python ${{ inputs.python-version }}
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1"
|
||||
with:
|
||||
path: |
|
||||
/opt/pipx/venvs/poetry
|
||||
# This step caches the poetry installation, so make sure it's keyed on the poetry version as well.
|
||||
key: bin-poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-${{ inputs.poetry-version }}
|
||||
|
||||
- name: Refresh shell hashtable and fixup softlinks
|
||||
if: steps.cache-bin-poetry.outputs.cache-hit == 'true'
|
||||
shell: bash
|
||||
env:
|
||||
POETRY_VERSION: ${{ inputs.poetry-version }}
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
run: |
|
||||
set -eux
|
||||
|
||||
# Refresh the shell hashtable, to ensure correct `which` output.
|
||||
hash -r
|
||||
|
||||
# `actions/cache@v3` doesn't always seem able to correctly unpack softlinks.
|
||||
# Delete and recreate the softlinks pipx expects to have.
|
||||
rm /opt/pipx/venvs/poetry/bin/python
|
||||
cd /opt/pipx/venvs/poetry/bin
|
||||
ln -s "$(which "python$PYTHON_VERSION")" python
|
||||
chmod +x python
|
||||
cd /opt/pipx_bin/
|
||||
ln -s /opt/pipx/venvs/poetry/bin/poetry poetry
|
||||
chmod +x poetry
|
||||
|
||||
# Ensure everything got set up correctly.
|
||||
/opt/pipx/venvs/poetry/bin/python --version
|
||||
/opt/pipx_bin/poetry --version
|
||||
|
||||
- name: Install poetry
|
||||
if: steps.cache-bin-poetry.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
env:
|
||||
POETRY_VERSION: ${{ inputs.poetry-version }}
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
run: pipx install "poetry==$POETRY_VERSION" --python "python$PYTHON_VERSION" --verbose
|
||||
|
||||
- name: Restore pip and poetry cached dependencies
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pip
|
||||
key: pip-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}
|
||||
|
||||
- run: pipx install poetry==${{ inputs.poetry-version }} --python python${{ inputs.python-version }}
|
||||
shell: bash
|
||||
|
||||
- name: Check Poetry File
|
||||
shell: bash
|
||||
run: |
|
||||
poetry check
|
||||
|
||||
- name: Check lock file
|
||||
shell: bash
|
||||
run: |
|
||||
poetry lock --check
|
||||
|
||||
- uses: actions/cache@v3
|
||||
id: cache-poetry
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pypoetry/virtualenvs
|
||||
~/.cache/pypoetry/cache
|
||||
~/.cache/pypoetry/artifacts
|
||||
key: poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles('poetry.lock') }}
|
||||
|
||||
- run: ${{ inputs.install-command }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
shell: bash
|
||||
${{ env.WORKDIR }}/.venv
|
||||
key: py-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/**/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
606
.github/tools/git-restore-mtime
vendored
Executable file
606
.github/tools/git-restore-mtime
vendored
Executable file
@@ -0,0 +1,606 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# git-restore-mtime - Change mtime of files based on commit date of last change
|
||||
#
|
||||
# Copyright (C) 2012 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. See <http://www.gnu.org/licenses/gpl.html>
|
||||
#
|
||||
# Source: https://github.com/MestreLion/git-tools
|
||||
# Version: July 13, 2023 (commit hash 5f832e72453e035fccae9d63a5056918d64476a2)
|
||||
"""
|
||||
Change the modification time (mtime) of files in work tree, based on the
|
||||
date of the most recent commit that modified the file, including renames.
|
||||
|
||||
Ignores untracked files and uncommitted deletions, additions and renames, and
|
||||
by default modifications too.
|
||||
---
|
||||
Useful prior to generating release tarballs, so each file is archived with a
|
||||
date that is similar to the date when the file was actually last modified,
|
||||
assuming the actual modification date and its commit date are close.
|
||||
"""
|
||||
|
||||
# TODO:
|
||||
# - Add -z on git whatchanged/ls-files, so we don't deal with filename decoding
|
||||
# - When Python is bumped to 3.7, use text instead of universal_newlines on subprocess
|
||||
# - Update "Statistics for some large projects" with modern hardware and repositories.
|
||||
# - Create a README.md for git-restore-mtime alone. It deserves extensive documentation
|
||||
# - Move Statistics there
|
||||
# - See git-extras as a good example on project structure and documentation
|
||||
|
||||
# FIXME:
|
||||
# - When current dir is outside the worktree, e.g. using --work-tree, `git ls-files`
|
||||
# assume any relative pathspecs are to worktree root, not the current dir. As such,
|
||||
# relative pathspecs may not work.
|
||||
# - Renames are tricky:
|
||||
# - R100 should not change mtime, but original name is not on filelist. Should
|
||||
# track renames until a valid (A, M) mtime found and then set on current name.
|
||||
# - Should set mtime for both current and original directories.
|
||||
# - Check mode changes with unchanged blobs?
|
||||
# - Check file (A, D) for the directory mtime is not sufficient:
|
||||
# - Renames also change dir mtime, unless rename was on a parent dir
|
||||
# - If most recent change of all files in a dir was a Modification (M),
|
||||
# dir might not be touched at all.
|
||||
# - Dirs containing only subdirectories but no direct files will also
|
||||
# not be touched. They're files' [grand]parent dir, but never their dirname().
|
||||
# - Some solutions:
|
||||
# - After files done, perform some dir processing for missing dirs, finding latest
|
||||
# file (A, D, R)
|
||||
# - Simple approach: dir mtime is the most recent child (dir or file) mtime
|
||||
# - Use a virtual concept of "created at most at" to fill missing info, bubble up
|
||||
# to parents and grandparents
|
||||
# - When handling [grand]parent dirs, stay inside <pathspec>
|
||||
# - Better handling of merge commits. `-m` is plain *wrong*. `-c/--cc` is perfect, but
|
||||
# painfully slow. First pass without merge commits is not accurate. Maybe add a new
|
||||
# `--accurate` mode for `--cc`?
|
||||
|
||||
if __name__ != "__main__":
|
||||
raise ImportError("{} should not be used as a module.".format(__name__))
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import logging
|
||||
import os.path
|
||||
import shlex
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
__version__ = "2022.12+dev"
|
||||
|
||||
# Update symlinks only if the platform supports not following them
|
||||
UPDATE_SYMLINKS = bool(os.utime in getattr(os, 'supports_follow_symlinks', []))
|
||||
|
||||
# Call os.path.normpath() only if not in a POSIX platform (Windows)
|
||||
NORMALIZE_PATHS = (os.path.sep != '/')
|
||||
|
||||
# How many files to process in each batch when re-trying merge commits
|
||||
STEPMISSING = 100
|
||||
|
||||
# (Extra) keywords for the os.utime() call performed by touch()
|
||||
UTIME_KWS = {} if not UPDATE_SYMLINKS else {'follow_symlinks': False}
|
||||
|
||||
|
||||
# Command-line interface ######################################################
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__.split('\n---')[0])
|
||||
|
||||
group = parser.add_mutually_exclusive_group()
|
||||
group.add_argument('--quiet', '-q', dest='loglevel',
|
||||
action="store_const", const=logging.WARNING, default=logging.INFO,
|
||||
help="Suppress informative messages and summary statistics.")
|
||||
group.add_argument('--verbose', '-v', action="count", help="""
|
||||
Print additional information for each processed file.
|
||||
Specify twice to further increase verbosity.
|
||||
""")
|
||||
|
||||
parser.add_argument('--cwd', '-C', metavar="DIRECTORY", help="""
|
||||
Run as if %(prog)s was started in directory %(metavar)s.
|
||||
This affects how --work-tree, --git-dir and PATHSPEC arguments are handled.
|
||||
See 'man 1 git' or 'git --help' for more information.
|
||||
""")
|
||||
|
||||
parser.add_argument('--git-dir', dest='gitdir', metavar="GITDIR", help="""
|
||||
Path to the git repository, by default auto-discovered by searching
|
||||
the current directory and its parents for a .git/ subdirectory.
|
||||
""")
|
||||
|
||||
parser.add_argument('--work-tree', dest='workdir', metavar="WORKTREE", help="""
|
||||
Path to the work tree root, by default the parent of GITDIR if it's
|
||||
automatically discovered, or the current directory if GITDIR is set.
|
||||
""")
|
||||
|
||||
parser.add_argument('--force', '-f', default=False, action="store_true", help="""
|
||||
Force updating files with uncommitted modifications.
|
||||
Untracked files and uncommitted deletions, renames and additions are
|
||||
always ignored.
|
||||
""")
|
||||
|
||||
parser.add_argument('--merge', '-m', default=False, action="store_true", help="""
|
||||
Include merge commits.
|
||||
Leads to more recent times and more files per commit, thus with the same
|
||||
time, which may or may not be what you want.
|
||||
Including merge commits may lead to fewer commits being evaluated as files
|
||||
are found sooner, which can improve performance, sometimes substantially.
|
||||
But as merge commits are usually huge, processing them may also take longer.
|
||||
By default, merge commits are only used for files missing from regular commits.
|
||||
""")
|
||||
|
||||
parser.add_argument('--first-parent', default=False, action="store_true", help="""
|
||||
Consider only the first parent, the "main branch", when evaluating merge commits.
|
||||
Only effective when merge commits are processed, either when --merge is
|
||||
used or when finding missing files after the first regular log search.
|
||||
See --skip-missing.
|
||||
""")
|
||||
|
||||
parser.add_argument('--skip-missing', '-s', dest="missing", default=True,
|
||||
action="store_false", help="""
|
||||
Do not try to find missing files.
|
||||
If merge commits were not evaluated with --merge and some files were
|
||||
not found in regular commits, by default %(prog)s searches for these
|
||||
files again in the merge commits.
|
||||
This option disables this retry, so files found only in merge commits
|
||||
will not have their timestamp updated.
|
||||
""")
|
||||
|
||||
parser.add_argument('--no-directories', '-D', dest='dirs', default=True,
|
||||
action="store_false", help="""
|
||||
Do not update directory timestamps.
|
||||
By default, use the time of its most recently created, renamed or deleted file.
|
||||
Note that just modifying a file will NOT update its directory time.
|
||||
""")
|
||||
|
||||
parser.add_argument('--test', '-t', default=False, action="store_true",
|
||||
help="Test run: do not actually update any file timestamp.")
|
||||
|
||||
parser.add_argument('--commit-time', '-c', dest='commit_time', default=False,
|
||||
action='store_true', help="Use commit time instead of author time.")
|
||||
|
||||
parser.add_argument('--oldest-time', '-o', dest='reverse_order', default=False,
|
||||
action='store_true', help="""
|
||||
Update times based on the oldest, instead of the most recent commit of a file.
|
||||
This reverses the order in which the git log is processed to emulate a
|
||||
file "creation" date. Note this will be inaccurate for files deleted and
|
||||
re-created at later dates.
|
||||
""")
|
||||
|
||||
parser.add_argument('--skip-older-than', metavar='SECONDS', type=int, help="""
|
||||
Ignore files that are currently older than %(metavar)s.
|
||||
Useful in workflows that assume such files already have a correct timestamp,
|
||||
as it may improve performance by processing fewer files.
|
||||
""")
|
||||
|
||||
parser.add_argument('--skip-older-than-commit', '-N', default=False,
|
||||
action='store_true', help="""
|
||||
Ignore files older than the timestamp it would be updated to.
|
||||
Such files may be considered "original", likely in the author's repository.
|
||||
""")
|
||||
|
||||
parser.add_argument('--unique-times', default=False, action="store_true", help="""
|
||||
Set the microseconds to a unique value per commit.
|
||||
Allows telling apart changes that would otherwise have identical timestamps,
|
||||
as git's time accuracy is in seconds.
|
||||
""")
|
||||
|
||||
parser.add_argument('pathspec', nargs='*', metavar='PATHSPEC', help="""
|
||||
Only modify paths matching %(metavar)s, relative to current directory.
|
||||
By default, update all but untracked files and submodules.
|
||||
""")
|
||||
|
||||
parser.add_argument('--version', '-V', action='version',
|
||||
version='%(prog)s version {version}'.format(version=get_version()))
|
||||
|
||||
args_ = parser.parse_args()
|
||||
if args_.verbose:
|
||||
args_.loglevel = max(logging.TRACE, logging.DEBUG // args_.verbose)
|
||||
args_.debug = args_.loglevel <= logging.DEBUG
|
||||
return args_
|
||||
|
||||
|
||||
def get_version(version=__version__):
|
||||
if not version.endswith('+dev'):
|
||||
return version
|
||||
try:
|
||||
cwd = os.path.dirname(os.path.realpath(__file__))
|
||||
return Git(cwd=cwd, errors=False).describe().lstrip('v')
|
||||
except Git.Error:
|
||||
return '-'.join((version, "unknown"))
|
||||
|
||||
|
||||
# Helper functions ############################################################
|
||||
|
||||
def setup_logging():
|
||||
"""Add TRACE logging level and corresponding method, return the root logger"""
|
||||
logging.TRACE = TRACE = logging.DEBUG // 2
|
||||
logging.Logger.trace = lambda _, m, *a, **k: _.log(TRACE, m, *a, **k)
|
||||
return logging.getLogger()
|
||||
|
||||
|
||||
def normalize(path):
|
||||
r"""Normalize paths from git, handling non-ASCII characters.
|
||||
|
||||
Git stores paths as UTF-8 normalization form C.
|
||||
If path contains non-ASCII or non-printable characters, git outputs the UTF-8
|
||||
in octal-escaped notation, escaping double-quotes and backslashes, and then
|
||||
double-quoting the whole path.
|
||||
https://git-scm.com/docs/git-config#Documentation/git-config.txt-corequotePath
|
||||
|
||||
This function reverts this encoding, so:
|
||||
normalize(r'"Back\\slash_double\"quote_a\303\247a\303\255"') =>
|
||||
r'Back\slash_double"quote_açaí')
|
||||
|
||||
Paths with invalid UTF-8 encoding, such as single 0x80-0xFF bytes (e.g, from
|
||||
Latin1/Windows-1251 encoding) are decoded using surrogate escape, the same
|
||||
method used by Python for filesystem paths. So 0xE6 ("æ" in Latin1, r'\\346'
|
||||
from Git) is decoded as "\udce6". See https://peps.python.org/pep-0383/ and
|
||||
https://vstinner.github.io/painful-history-python-filesystem-encoding.html
|
||||
|
||||
Also see notes on `windows/non-ascii-paths.txt` about path encodings on
|
||||
non-UTF-8 platforms and filesystems.
|
||||
"""
|
||||
if path and path[0] == '"':
|
||||
# Python 2: path = path[1:-1].decode("string-escape")
|
||||
# Python 3: https://stackoverflow.com/a/46650050/624066
|
||||
path = (path[1:-1] # Remove enclosing double quotes
|
||||
.encode('latin1') # Convert to bytes, required by 'unicode-escape'
|
||||
.decode('unicode-escape') # Perform the actual octal-escaping decode
|
||||
.encode('latin1') # 1:1 mapping to bytes, UTF-8 encoded
|
||||
.decode('utf8', 'surrogateescape')) # Decode from UTF-8
|
||||
if NORMALIZE_PATHS:
|
||||
# Make sure the slash matches the OS; for Windows we need a backslash
|
||||
path = os.path.normpath(path)
|
||||
return path
|
||||
|
||||
|
||||
def dummy(*_args, **_kwargs):
|
||||
"""No-op function used in dry-run tests"""
|
||||
|
||||
|
||||
def touch(path, mtime):
|
||||
"""The actual mtime update"""
|
||||
os.utime(path, (mtime, mtime), **UTIME_KWS)
|
||||
|
||||
|
||||
def touch_ns(path, mtime_ns):
|
||||
"""The actual mtime update, using nanoseconds for unique timestamps"""
|
||||
os.utime(path, None, ns=(mtime_ns, mtime_ns), **UTIME_KWS)
|
||||
|
||||
|
||||
def isodate(secs: int):
|
||||
# time.localtime() accepts floats, but discards fractional part
|
||||
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(secs))
|
||||
|
||||
|
||||
def isodate_ns(ns: int):
|
||||
# for integers fromtimestamp() is equivalent and ~16% slower than isodate()
|
||||
return datetime.datetime.fromtimestamp(ns / 1000000000).isoformat(sep=' ')
|
||||
|
||||
|
||||
def get_mtime_ns(secs: int, idx: int):
|
||||
# Time resolution for filesystems and functions:
|
||||
# ext-4 and other POSIX filesystems: 1 nanosecond
|
||||
# NTFS (Windows default): 100 nanoseconds
|
||||
# datetime.datetime() (due to 64-bit float epoch): 1 microsecond
|
||||
us = idx % 1000000 # 10**6
|
||||
return 1000 * (1000000 * secs + us)
|
||||
|
||||
|
||||
def get_mtime_path(path):
|
||||
return os.path.getmtime(path)
|
||||
|
||||
|
||||
# Git class and parse_log(), the heart of the script ##########################
|
||||
|
||||
class Git:
|
||||
def __init__(self, workdir=None, gitdir=None, cwd=None, errors=True):
|
||||
self.gitcmd = ['git']
|
||||
self.errors = errors
|
||||
self._proc = None
|
||||
if workdir: self.gitcmd.extend(('--work-tree', workdir))
|
||||
if gitdir: self.gitcmd.extend(('--git-dir', gitdir))
|
||||
if cwd: self.gitcmd.extend(('-C', cwd))
|
||||
self.workdir, self.gitdir = self._get_repo_dirs()
|
||||
|
||||
def ls_files(self, paths: list = None):
|
||||
return (normalize(_) for _ in self._run('ls-files --full-name', paths))
|
||||
|
||||
def ls_dirty(self, force=False):
|
||||
return (normalize(_[3:].split(' -> ', 1)[-1])
|
||||
for _ in self._run('status --porcelain')
|
||||
if _[:2] != '??' and (not force or (_[0] in ('R', 'A')
|
||||
or _[1] == 'D')))
|
||||
|
||||
def log(self, merge=False, first_parent=False, commit_time=False,
|
||||
reverse_order=False, paths: list = None):
|
||||
cmd = 'whatchanged --pretty={}'.format('%ct' if commit_time else '%at')
|
||||
if merge: cmd += ' -m'
|
||||
if first_parent: cmd += ' --first-parent'
|
||||
if reverse_order: cmd += ' --reverse'
|
||||
return self._run(cmd, paths)
|
||||
|
||||
def describe(self):
|
||||
return self._run('describe --tags', check=True)[0]
|
||||
|
||||
def terminate(self):
|
||||
if self._proc is None:
|
||||
return
|
||||
try:
|
||||
self._proc.terminate()
|
||||
except OSError:
|
||||
# Avoid errors on OpenBSD
|
||||
pass
|
||||
|
||||
def _get_repo_dirs(self):
|
||||
return (os.path.normpath(_) for _ in
|
||||
self._run('rev-parse --show-toplevel --absolute-git-dir', check=True))
|
||||
|
||||
def _run(self, cmdstr: str, paths: list = None, output=True, check=False):
|
||||
cmdlist = self.gitcmd + shlex.split(cmdstr)
|
||||
if paths:
|
||||
cmdlist.append('--')
|
||||
cmdlist.extend(paths)
|
||||
popen_args = dict(universal_newlines=True, encoding='utf8')
|
||||
if not self.errors:
|
||||
popen_args['stderr'] = subprocess.DEVNULL
|
||||
log.trace("Executing: %s", ' '.join(cmdlist))
|
||||
if not output:
|
||||
return subprocess.call(cmdlist, **popen_args)
|
||||
if check:
|
||||
try:
|
||||
stdout: str = subprocess.check_output(cmdlist, **popen_args)
|
||||
return stdout.splitlines()
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise self.Error(e.returncode, e.cmd, e.output, e.stderr)
|
||||
self._proc = subprocess.Popen(cmdlist, stdout=subprocess.PIPE, **popen_args)
|
||||
return (_.rstrip() for _ in self._proc.stdout)
|
||||
|
||||
def __del__(self):
|
||||
self.terminate()
|
||||
|
||||
class Error(subprocess.CalledProcessError):
|
||||
"""Error from git executable"""
|
||||
|
||||
|
||||
def parse_log(filelist, dirlist, stats, git, merge=False, filterlist=None):
|
||||
mtime = 0
|
||||
datestr = isodate(0)
|
||||
for line in git.log(
|
||||
merge,
|
||||
args.first_parent,
|
||||
args.commit_time,
|
||||
args.reverse_order,
|
||||
filterlist
|
||||
):
|
||||
stats['loglines'] += 1
|
||||
|
||||
# Blank line between Date and list of files
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Date line
|
||||
if line[0] != ':': # Faster than `not line.startswith(':')`
|
||||
stats['commits'] += 1
|
||||
mtime = int(line)
|
||||
if args.unique_times:
|
||||
mtime = get_mtime_ns(mtime, stats['commits'])
|
||||
if args.debug:
|
||||
datestr = isodate(mtime)
|
||||
continue
|
||||
|
||||
# File line: three tokens if it describes a renaming, otherwise two
|
||||
tokens = line.split('\t')
|
||||
|
||||
# Possible statuses:
|
||||
# M: Modified (content changed)
|
||||
# A: Added (created)
|
||||
# D: Deleted
|
||||
# T: Type changed: to/from regular file, symlinks, submodules
|
||||
# R099: Renamed (moved), with % of unchanged content. 100 = pure rename
|
||||
# Not possible in log: C=Copied, U=Unmerged, X=Unknown, B=pairing Broken
|
||||
status = tokens[0].split(' ')[-1]
|
||||
file = tokens[-1]
|
||||
|
||||
# Handles non-ASCII chars and OS path separator
|
||||
file = normalize(file)
|
||||
|
||||
def do_file():
|
||||
if args.skip_older_than_commit and get_mtime_path(file) <= mtime:
|
||||
stats['skip'] += 1
|
||||
return
|
||||
if args.debug:
|
||||
log.debug("%d\t%d\t%d\t%s\t%s",
|
||||
stats['loglines'], stats['commits'], stats['files'],
|
||||
datestr, file)
|
||||
try:
|
||||
touch(os.path.join(git.workdir, file), mtime)
|
||||
stats['touches'] += 1
|
||||
except Exception as e:
|
||||
log.error("ERROR: %s: %s", e, file)
|
||||
stats['errors'] += 1
|
||||
|
||||
def do_dir():
|
||||
if args.debug:
|
||||
log.debug("%d\t%d\t-\t%s\t%s",
|
||||
stats['loglines'], stats['commits'],
|
||||
datestr, "{}/".format(dirname or '.'))
|
||||
try:
|
||||
touch(os.path.join(git.workdir, dirname), mtime)
|
||||
stats['dirtouches'] += 1
|
||||
except Exception as e:
|
||||
log.error("ERROR: %s: %s", e, dirname)
|
||||
stats['direrrors'] += 1
|
||||
|
||||
if file in filelist:
|
||||
stats['files'] -= 1
|
||||
filelist.remove(file)
|
||||
do_file()
|
||||
|
||||
if args.dirs and status in ('A', 'D'):
|
||||
dirname = os.path.dirname(file)
|
||||
if dirname in dirlist:
|
||||
dirlist.remove(dirname)
|
||||
do_dir()
|
||||
|
||||
# All files done?
|
||||
if not stats['files']:
|
||||
git.terminate()
|
||||
return
|
||||
|
||||
|
||||
# Main Logic ##################################################################
|
||||
|
||||
def main():
|
||||
start = time.time() # yes, Wall time. CPU time is not realistic for users.
|
||||
stats = {_: 0 for _ in ('loglines', 'commits', 'touches', 'skip', 'errors',
|
||||
'dirtouches', 'direrrors')}
|
||||
|
||||
logging.basicConfig(level=args.loglevel, format='%(message)s')
|
||||
log.trace("Arguments: %s", args)
|
||||
|
||||
# First things first: Where and Who are we?
|
||||
if args.cwd:
|
||||
log.debug("Changing directory: %s", args.cwd)
|
||||
try:
|
||||
os.chdir(args.cwd)
|
||||
except OSError as e:
|
||||
log.critical(e)
|
||||
return e.errno
|
||||
# Using both os.chdir() and `git -C` is redundant, but might prevent side effects
|
||||
# `git -C` alone could be enough if we make sure that:
|
||||
# - all paths, including args.pathspec, are processed by git: ls-files, rev-parse
|
||||
# - touch() / os.utime() path argument is always prepended with git.workdir
|
||||
try:
|
||||
git = Git(workdir=args.workdir, gitdir=args.gitdir, cwd=args.cwd)
|
||||
except Git.Error as e:
|
||||
# Not in a git repository, and git already informed user on stderr. So we just...
|
||||
return e.returncode
|
||||
|
||||
# Get the files managed by git and build file list to be processed
|
||||
if UPDATE_SYMLINKS and not args.skip_older_than:
|
||||
filelist = set(git.ls_files(args.pathspec))
|
||||
else:
|
||||
filelist = set()
|
||||
for path in git.ls_files(args.pathspec):
|
||||
fullpath = os.path.join(git.workdir, path)
|
||||
|
||||
# Symlink (to file, to dir or broken - git handles the same way)
|
||||
if not UPDATE_SYMLINKS and os.path.islink(fullpath):
|
||||
log.warning("WARNING: Skipping symlink, no OS support for updates: %s",
|
||||
path)
|
||||
continue
|
||||
|
||||
# skip files which are older than given threshold
|
||||
if (args.skip_older_than
|
||||
and start - get_mtime_path(fullpath) > args.skip_older_than):
|
||||
continue
|
||||
|
||||
# Always add files relative to worktree root
|
||||
filelist.add(path)
|
||||
|
||||
# If --force, silently ignore uncommitted deletions (not in the filesystem)
|
||||
# and renames / additions (will not be found in log anyway)
|
||||
if args.force:
|
||||
filelist -= set(git.ls_dirty(force=True))
|
||||
# Otherwise, ignore any dirty files
|
||||
else:
|
||||
dirty = set(git.ls_dirty())
|
||||
if dirty:
|
||||
log.warning("WARNING: Modified files in the working directory were ignored."
|
||||
"\nTo include such files, commit your changes or use --force.")
|
||||
filelist -= dirty
|
||||
|
||||
# Build dir list to be processed
|
||||
dirlist = set(os.path.dirname(_) for _ in filelist) if args.dirs else set()
|
||||
|
||||
stats['totalfiles'] = stats['files'] = len(filelist)
|
||||
log.info("{0:,} files to be processed in work dir".format(stats['totalfiles']))
|
||||
|
||||
if not filelist:
|
||||
# Nothing to do. Exit silently and without errors, just like git does
|
||||
return
|
||||
|
||||
# Process the log until all files are 'touched'
|
||||
log.debug("Line #\tLog #\tF.Left\tModification Time\tFile Name")
|
||||
parse_log(filelist, dirlist, stats, git, args.merge, args.pathspec)
|
||||
|
||||
# Missing files
|
||||
if filelist:
|
||||
# Try to find them in merge logs, if not done already
|
||||
# (usually HUGE, thus MUCH slower!)
|
||||
if args.missing and not args.merge:
|
||||
filterlist = list(filelist)
|
||||
missing = len(filterlist)
|
||||
log.info("{0:,} files not found in log, trying merge commits".format(missing))
|
||||
for i in range(0, missing, STEPMISSING):
|
||||
parse_log(filelist, dirlist, stats, git,
|
||||
merge=True, filterlist=filterlist[i:i + STEPMISSING])
|
||||
|
||||
# Still missing some?
|
||||
for file in filelist:
|
||||
log.warning("WARNING: not found in the log: %s", file)
|
||||
|
||||
# Final statistics
|
||||
# Suggestion: use git-log --before=mtime to brag about skipped log entries
|
||||
def log_info(msg, *a, width=13):
|
||||
ifmt = '{:%d,}' % (width,) # not using 'n' for consistency with ffmt
|
||||
ffmt = '{:%d,.2f}' % (width,)
|
||||
# %-formatting lacks a thousand separator, must pre-render with .format()
|
||||
log.info(msg.replace('%d', ifmt).replace('%f', ffmt).format(*a))
|
||||
|
||||
log_info(
|
||||
"Statistics:\n"
|
||||
"%f seconds\n"
|
||||
"%d log lines processed\n"
|
||||
"%d commits evaluated",
|
||||
time.time() - start, stats['loglines'], stats['commits'])
|
||||
|
||||
if args.dirs:
|
||||
if stats['direrrors']: log_info("%d directory update errors", stats['direrrors'])
|
||||
log_info("%d directories updated", stats['dirtouches'])
|
||||
|
||||
if stats['touches'] != stats['totalfiles']:
|
||||
log_info("%d files", stats['totalfiles'])
|
||||
if stats['skip']: log_info("%d files skipped", stats['skip'])
|
||||
if stats['files']: log_info("%d files missing", stats['files'])
|
||||
if stats['errors']: log_info("%d file update errors", stats['errors'])
|
||||
|
||||
log_info("%d files updated", stats['touches'])
|
||||
|
||||
if args.test:
|
||||
log.info("TEST RUN - No files modified!")
|
||||
|
||||
|
||||
# Keep only essential, global assignments here. Any other logic must be in main()
|
||||
log = setup_logging()
|
||||
args = parse_args()
|
||||
|
||||
# Set the actual touch() and other functions based on command-line arguments
|
||||
if args.unique_times:
|
||||
touch = touch_ns
|
||||
isodate = isodate_ns
|
||||
|
||||
# Make sure this is always set last to ensure --test behaves as intended
|
||||
if args.test:
|
||||
touch = dummy
|
||||
|
||||
# UI done, it's showtime!
|
||||
try:
|
||||
sys.exit(main())
|
||||
except KeyboardInterrupt:
|
||||
log.info("\nAborting")
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
57
.github/workflows/_compile_integration_test.yml
vendored
Normal file
57
.github/workflows/_compile_integration_test.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: compile-integration-test
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: compile-integration
|
||||
|
||||
- name: Install integration dependencies
|
||||
shell: bash
|
||||
run: poetry install --with=test_integration
|
||||
|
||||
- name: Check integration tests compile
|
||||
shell: bash
|
||||
run: poetry run pytest -m compile tests/integration_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
93
.github/workflows/_lint.yml
vendored
Normal file
93
.github/workflows/_lint.yml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
name: lint
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
langchain-location:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
|
||||
# This env var allows us to get inline annotations when ruff has complaints.
|
||||
RUFF_OUTPUT_FORMAT: github
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
# Only lint on the min and max supported Python versions.
|
||||
# It's extremely unlikely that there's a lint issue on any version in between
|
||||
# that doesn't show up on the min or max versions.
|
||||
#
|
||||
# GitHub rate-limits how many jobs can be running at any one time.
|
||||
# Starting new jobs is also relatively slow,
|
||||
# so linting on fewer versions makes CI faster.
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.11"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: lint-with-extras
|
||||
|
||||
- name: Check Poetry File
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry check
|
||||
|
||||
- name: Check lock file
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry lock --check
|
||||
|
||||
- name: Install dependencies
|
||||
# Also installs dev/lint/test/typing dependencies, to ensure we have
|
||||
# type hints for as many of our libraries as possible.
|
||||
# This helps catch errors that require dependencies to be spotted, for example:
|
||||
# https://github.com/langchain-ai/langchain/pull/10249/files#diff-935185cd488d015f026dcd9e19616ff62863e8cde8c0bee70318d3ccbca98341
|
||||
#
|
||||
# If you change this configuration, make sure to change the `cache-key`
|
||||
# in the `poetry_setup` action above to stop using the old cache.
|
||||
# It doesn't matter how you change it, any change will cause a cache-bust.
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry install --with dev,lint,test,typing
|
||||
|
||||
- name: Install langchain editable
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ inputs.langchain-location }}
|
||||
env:
|
||||
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
|
||||
run: |
|
||||
pip install -e "$LANGCHAIN_LOCATION"
|
||||
|
||||
- name: Get .mypy_cache to speed up mypy
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
key: mypy-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint
|
||||
93
.github/workflows/_pydantic_compatibility.yml
vendored
Normal file
93
.github/workflows/_pydantic_compatibility.yml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
name: pydantic v1/v2 compatibility
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Pydantic v1/v2 compatibility - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: pydantic-cross-compat
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: poetry install
|
||||
|
||||
- name: Install the opposite major version of pydantic
|
||||
# If normal tests use pydantic v1, here we'll use v2, and vice versa.
|
||||
shell: bash
|
||||
run: |
|
||||
# Determine the major part of pydantic version
|
||||
REGULAR_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
|
||||
if [[ "$REGULAR_VERSION" == "1" ]]; then
|
||||
PYDANTIC_DEP=">=2.1,<3"
|
||||
TEST_WITH_VERSION="2"
|
||||
elif [[ "$REGULAR_VERSION" == "2" ]]; then
|
||||
PYDANTIC_DEP="<2"
|
||||
TEST_WITH_VERSION="1"
|
||||
else
|
||||
echo "Unexpected pydantic major version '$REGULAR_VERSION', cannot determine which version to use for cross-compatibility test."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install via `pip` instead of `poetry add` to avoid changing lockfile,
|
||||
# which would prevent caching from working: the cache would get saved
|
||||
# to a different key than where it gets loaded from.
|
||||
poetry run pip install "pydantic${PYDANTIC_DEP}"
|
||||
|
||||
# Ensure that the correct pydantic is installed now.
|
||||
echo "Checking pydantic version... Expecting ${TEST_WITH_VERSION}"
|
||||
|
||||
# Determine the major part of pydantic version
|
||||
CURRENT_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
|
||||
# Check that the major part of pydantic version is as expected, if not
|
||||
# raise an error
|
||||
if [[ "$CURRENT_VERSION" != "$TEST_WITH_VERSION" ]]; then
|
||||
echo "Error: expected pydantic version ${CURRENT_VERSION} to have been installed, but found: ${TEST_WITH_VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Found pydantic version ${CURRENT_VERSION}, as expected"
|
||||
- name: Run pydantic compatibility tests
|
||||
shell: bash
|
||||
run: make test
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
202
.github/workflows/_release.yml
vendored
Normal file
202
.github/workflows/_release.yml
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
name: release
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.10"
|
||||
POETRY_VERSION: "1.6.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
pkg-name: ${{ steps.check-version.outputs.pkg-name }}
|
||||
version: ${{ steps.check-version.outputs.version }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
# We want to keep this build stage *separate* from the release stage,
|
||||
# so that there's no sharing of permissions between them.
|
||||
# The release stage has trusted publishing and GitHub repo contents write access,
|
||||
# and we want to keep the scope of that access limited just to the release job.
|
||||
# Otherwise, a malicious `build` step (e.g. via a compromised dependency)
|
||||
# could get access to our GitHub or PyPI credentials.
|
||||
#
|
||||
# Per the trusted publishing GitHub Action:
|
||||
# > It is strongly advised to separate jobs for building [...]
|
||||
# > from the publish job.
|
||||
# https://github.com/pypa/gh-action-pypi-publish#non-goals
|
||||
- name: Build project for distribution
|
||||
run: poetry build
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT
|
||||
echo version="$(poetry version --short)" >> $GITHUB_OUTPUT
|
||||
|
||||
test-pypi-publish:
|
||||
needs:
|
||||
- build
|
||||
uses:
|
||||
./.github/workflows/_test_release.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
pre-release-checks:
|
||||
needs:
|
||||
- build
|
||||
- test-pypi-publish
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# We explicitly *don't* set up caching here. This ensures our tests are
|
||||
# maximally sensitive to catching breakage.
|
||||
#
|
||||
# For example, here's a way that caching can cause a falsely-passing test:
|
||||
# - Make the langchain package manifest no longer list a dependency package
|
||||
# as a requirement. This means it won't be installed by `pip install`,
|
||||
# and attempting to use it would cause a crash.
|
||||
# - That dependency used to be required, so it may have been cached.
|
||||
# When restoring the venv packages from cache, that dependency gets included.
|
||||
# - Tests pass, because the dependency is present even though it wasn't specified.
|
||||
# - The package is published, and it breaks on the missing dependency when
|
||||
# used in the real world.
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Test published package
|
||||
shell: bash
|
||||
env:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
# Here we use:
|
||||
# - The default regular PyPI index as the *primary* index, meaning
|
||||
# that it takes priority (https://pypi.org/simple)
|
||||
# - The test PyPI index as an extra index, so that any dependencies that
|
||||
# are not found on test PyPI can be resolved and installed anyway.
|
||||
# (https://test.pypi.org/simple). This will include the PKG_NAME==VERSION
|
||||
# package because VERSION will not have been uploaded to regular PyPI yet.
|
||||
#
|
||||
# TODO: add more in-depth pre-publish tests after testing that importing works
|
||||
run: |
|
||||
pip install \
|
||||
--extra-index-url https://test.pypi.org/simple/ \
|
||||
"$PKG_NAME==$VERSION"
|
||||
|
||||
# Replace all dashes in the package name with underscores,
|
||||
# since that's how Python imports packages with dashes in the name.
|
||||
IMPORT_NAME="$(echo "$PKG_NAME" | sed s/-/_/g)"
|
||||
|
||||
python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))"
|
||||
|
||||
publish:
|
||||
needs:
|
||||
- build
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# This permission is used for trusted publishing:
|
||||
# https://blog.pypi.org/posts/2023-04-20-introducing-trusted-publishers/
|
||||
#
|
||||
# Trusted publishing has to also be configured on PyPI for each package:
|
||||
# https://docs.pypi.org/trusted-publishers/adding-a-publisher/
|
||||
id-token: write
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Publish package distributions to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: ${{ inputs.working-directory }}/dist/
|
||||
verbose: true
|
||||
print-hash: true
|
||||
|
||||
mark-release:
|
||||
needs:
|
||||
- build
|
||||
- test-pypi-publish
|
||||
- pre-release-checks
|
||||
- publish
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# This permission is needed by `ncipollo/release-action` to
|
||||
# create the GitHub release.
|
||||
contents: write
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
if: ${{ inputs.working-directory == 'libs/langchain' }}
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
tag: v${{ needs.build.outputs.version }}
|
||||
commit: master
|
||||
62
.github/workflows/_release_docker.yml
vendored
Normal file
62
.github/workflows/_release_docker.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: release_docker
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
dockerfile:
|
||||
required: true
|
||||
type: string
|
||||
description: "Path to the Dockerfile to build"
|
||||
image:
|
||||
required: true
|
||||
type: string
|
||||
description: "Name of the image to build"
|
||||
|
||||
env:
|
||||
TEST_TAG: ${{ inputs.image }}:test
|
||||
LATEST_TAG: ${{ inputs.image }}:latest
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Get git tag
|
||||
uses: actions-ecosystem/action-get-latest-tag@v1
|
||||
id: get-latest-tag
|
||||
- name: Set docker tag
|
||||
env:
|
||||
VERSION: ${{ steps.get-latest-tag.outputs.tag }}
|
||||
run: |
|
||||
echo "VERSION_TAG=${{ inputs.image }}:${VERSION#v}" >> $GITHUB_ENV
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build for Test
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.dockerfile }}
|
||||
load: true
|
||||
tags: ${{ env.TEST_TAG }}
|
||||
- name: Test
|
||||
run: |
|
||||
docker run --rm ${{ env.TEST_TAG }} python -c "import langchain"
|
||||
- name: Build and Push to Docker Hub
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ${{ inputs.dockerfile }}
|
||||
# We can only build for the intersection of platforms supported by
|
||||
# QEMU and base python image, for now build only for
|
||||
# linux/amd64 and linux/arm64
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ env.LATEST_TAG }},${{ env.VERSION_TAG }}
|
||||
push: true
|
||||
57
.github/workflows/_test.yml
vendored
Normal file
57
.github/workflows/_test.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: test
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: poetry install
|
||||
|
||||
- name: Run core tests
|
||||
shell: bash
|
||||
run: make test
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
95
.github/workflows/_test_release.yml
vendored
Normal file
95
.github/workflows/_test_release.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: test-release
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
pkg-name: ${{ steps.check-version.outputs.pkg-name }}
|
||||
version: ${{ steps.check-version.outputs.version }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
# We want to keep this build stage *separate* from the release stage,
|
||||
# so that there's no sharing of permissions between them.
|
||||
# The release stage has trusted publishing and GitHub repo contents write access,
|
||||
# and we want to keep the scope of that access limited just to the release job.
|
||||
# Otherwise, a malicious `build` step (e.g. via a compromised dependency)
|
||||
# could get access to our GitHub or PyPI credentials.
|
||||
#
|
||||
# Per the trusted publishing GitHub Action:
|
||||
# > It is strongly advised to separate jobs for building [...]
|
||||
# > from the publish job.
|
||||
# https://github.com/pypa/gh-action-pypi-publish#non-goals
|
||||
- name: Build project for distribution
|
||||
run: poetry build
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT
|
||||
echo version="$(poetry version --short)" >> $GITHUB_OUTPUT
|
||||
|
||||
publish:
|
||||
needs:
|
||||
- build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# This permission is used for trusted publishing:
|
||||
# https://blog.pypi.org/posts/2023-04-20-introducing-trusted-publishers/
|
||||
#
|
||||
# Trusted publishing has to also be configured on PyPI for each package:
|
||||
# https://docs.pypi.org/trusted-publishers/adding-a-publisher/
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: test-dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Publish to test PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: ${{ inputs.working-directory }}/dist/
|
||||
verbose: true
|
||||
print-hash: true
|
||||
repository-url: https://test.pypi.org/legacy/
|
||||
|
||||
# We overwrite any existing distributions with the same name and version.
|
||||
# This is *only for CI use* and is *extremely dangerous* otherwise!
|
||||
# https://github.com/pypa/gh-action-pypi-publish#tolerating-release-package-file-duplicates
|
||||
skip-existing: true
|
||||
36
.github/workflows/codespell.yml
vendored
Normal file
36
.github/workflows/codespell.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Codespell
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
codespell:
|
||||
name: Check for spelling errors
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
pip install toml
|
||||
|
||||
- name: Extract Ignore Words List
|
||||
run: |
|
||||
# Use a Python script to extract the ignore words list from pyproject.toml
|
||||
python .github/workflows/extract_ignored_words_list.py
|
||||
id: extract_ignore_words
|
||||
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
skip: guide_imports.json
|
||||
ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }}
|
||||
35
.github/workflows/doc_lint.yml
vendored
Normal file
35
.github/workflows/doc_lint.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
name: Docs, templates, cookbook lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'templates/**'
|
||||
- 'cookbook/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/doc_lint.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run import check
|
||||
run: |
|
||||
# We should not encourage imports directly from main init file
|
||||
# Expect for hub
|
||||
git grep 'from langchain import' {docs/docs,templates,cookbook} | grep -vE 'from langchain import (hub)' && exit 1 || exit 0
|
||||
|
||||
lint:
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: "."
|
||||
secrets: inherit
|
||||
10
.github/workflows/extract_ignored_words_list.py
vendored
Normal file
10
.github/workflows/extract_ignored_words_list.py
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
import toml
|
||||
|
||||
pyproject_toml = toml.load("pyproject.toml")
|
||||
|
||||
# Extract the ignore words list (adjust the key as per your TOML structure)
|
||||
ignore_words_list = (
|
||||
pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list")
|
||||
)
|
||||
|
||||
print(f"::set-output name=ignore_words_list::{ignore_words_list}")
|
||||
105
.github/workflows/langchain_ci.yml
vendored
Normal file
105
.github/workflows/langchain_ci.yml
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
---
|
||||
name: libs/langchain CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/actions/poetry_setup/action.yml'
|
||||
- '.github/tools/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/_test.yml'
|
||||
- '.github/workflows/_pydantic_compatibility.yml'
|
||||
- '.github/workflows/langchain_ci.yml'
|
||||
- 'libs/*'
|
||||
- 'libs/langchain/**'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: "libs/langchain"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses:
|
||||
./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
uses:
|
||||
./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
pydantic-compatibility:
|
||||
uses:
|
||||
./.github/workflows/_pydantic_compatibility.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/langchain
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
47
.github/workflows/langchain_cli_ci.yml
vendored
Normal file
47
.github/workflows/langchain_cli_ci.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
name: libs/cli CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/actions/poetry_setup/action.yml'
|
||||
- '.github/tools/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/_test.yml'
|
||||
- '.github/workflows/_pydantic_compatibility.yml'
|
||||
- '.github/workflows/langchain_cli_ci.yml'
|
||||
- 'libs/cli/**'
|
||||
- 'libs/*'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: "libs/cli"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/cli
|
||||
langchain-location: ../langchain
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses:
|
||||
./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/cli
|
||||
secrets: inherit
|
||||
13
.github/workflows/langchain_cli_release.yml
vendored
Normal file
13
.github/workflows/langchain_cli_release.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: libs/cli Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses:
|
||||
./.github/workflows/_release.yml
|
||||
with:
|
||||
working-directory: libs/cli
|
||||
secrets: inherit
|
||||
137
.github/workflows/langchain_experimental_ci.yml
vendored
Normal file
137
.github/workflows/langchain_experimental_ci.yml
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
---
|
||||
name: libs/experimental CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/actions/poetry_setup/action.yml'
|
||||
- '.github/tools/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/_test.yml'
|
||||
- '.github/workflows/langchain_experimental_ci.yml'
|
||||
- 'libs/*'
|
||||
- 'libs/experimental/**'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: "libs/experimental"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
langchain-location: ../langchain
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses:
|
||||
./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
uses:
|
||||
./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
|
||||
# It's possible that langchain-experimental works fine with the latest *published* langchain,
|
||||
# but is broken with the langchain on `master`.
|
||||
#
|
||||
# We want to catch situations like that *before* releasing a new langchain, hence this test.
|
||||
test-with-latest-langchain:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: test with unpublished langchain - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
cache-key: unpublished-langchain
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running tests with unpublished langchain, installing dependencies with poetry..."
|
||||
poetry install
|
||||
|
||||
echo "Editably installing langchain outside of poetry, to avoid messing up lockfile..."
|
||||
poetry run pip install -e ../langchain
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
extended-tests:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/experimental
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
13
.github/workflows/langchain_experimental_release.yml
vendored
Normal file
13
.github/workflows/langchain_experimental_release.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: libs/experimental Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses:
|
||||
./.github/workflows/_release.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
13
.github/workflows/langchain_experimental_test_release.yml
vendored
Normal file
13
.github/workflows/langchain_experimental_test_release.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: Experimental Test Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses:
|
||||
./.github/workflows/_test_release.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
27
.github/workflows/langchain_release.yml
vendored
Normal file
27
.github/workflows/langchain_release.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: libs/langchain Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses:
|
||||
./.github/workflows/_release.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
# N.B.: It's possible that PyPI doesn't make the new release visible / available
|
||||
# immediately after publishing. If that happens, the docker build might not
|
||||
# create a new docker image for the new release, since it won't see it.
|
||||
#
|
||||
# If this ends up being a problem, add a check to the end of the `_release.yml`
|
||||
# workflow that prevents the workflow from finishing until the new release
|
||||
# is visible and installable on PyPI.
|
||||
release-docker:
|
||||
needs:
|
||||
- release
|
||||
uses:
|
||||
./.github/workflows/langchain_release_docker.yml
|
||||
secrets: inherit
|
||||
14
.github/workflows/langchain_release_docker.yml
vendored
Normal file
14
.github/workflows/langchain_release_docker.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
name: docker/langchain/langchain Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
workflow_call: # Allows triggering from another workflow
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses: ./.github/workflows/_release_docker.yml
|
||||
with:
|
||||
dockerfile: docker/Dockerfile.base
|
||||
image: langchain/langchain
|
||||
secrets: inherit
|
||||
13
.github/workflows/langchain_test_release.yml
vendored
Normal file
13
.github/workflows/langchain_test_release.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: Test Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses:
|
||||
./.github/workflows/_test_release.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
38
.github/workflows/linkcheck.yml
vendored
38
.github/workflows/linkcheck.yml
vendored
@@ -1,38 +0,0 @@
|
||||
name: linkcheck
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
paths:
|
||||
- 'docs/**'
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.11"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
poetry install --with docs
|
||||
- name: Build the docs
|
||||
run: |
|
||||
make docs_build
|
||||
- name: Analyzing the docs with linkcheck
|
||||
run: |
|
||||
make docs_linkcheck
|
||||
36
.github/workflows/lint.yml
vendored
36
.github/workflows/lint.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
poetry install
|
||||
- name: Analysing the code with our lint
|
||||
run: |
|
||||
make lint
|
||||
49
.github/workflows/release.yml
vendored
49
.github/workflows/release.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: release
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
|
||||
jobs:
|
||||
if_release:
|
||||
if: |
|
||||
${{ github.event.pull_request.merged == true }}
|
||||
&& ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install poetry
|
||||
run: pipx install poetry==$POETRY_VERSION
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
- name: Build project for distribution
|
||||
run: poetry build
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
tag: v${{ steps.check-version.outputs.version }}
|
||||
commit: master
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
poetry publish
|
||||
86
.github/workflows/scheduled_test.yml
vendored
Normal file
86
.github/workflows/scheduled_test.yml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
name: Scheduled tests
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: libs/langchain
|
||||
runs-on: ubuntu-latest
|
||||
environment: Scheduled testing
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/langchain
|
||||
cache-key: scheduled
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
uses: 'google-github-actions/auth@v1'
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ vars.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: libs/langchain
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
poetry install --with=test_integration
|
||||
poetry run pip install google-cloud-aiplatform
|
||||
poetry run pip install "boto3>=1.28.57"
|
||||
if [[ ${{ matrix.python-version }} != "3.8" ]]
|
||||
then
|
||||
poetry run pip install fireworks-ai
|
||||
fi
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_DEPLOYMENT_NAME }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
run: |
|
||||
make scheduled_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
37
.github/workflows/templates_ci.yml
vendored
Normal file
37
.github/workflows/templates_ci.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: templates CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/actions/poetry_setup/action.yml'
|
||||
- '.github/tools/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/templates_ci.yml'
|
||||
- 'templates/**'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: "templates"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: templates
|
||||
langchain-location: ../libs/langchain
|
||||
secrets: inherit
|
||||
49
.github/workflows/test.yml
vendored
49
.github/workflows/test.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.4.2"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
test_type:
|
||||
- "core"
|
||||
- "extended"
|
||||
name: Python ${{ matrix.python-version }} ${{ matrix.test_type }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: "1.4.2"
|
||||
cache-key: ${{ matrix.test_type }}
|
||||
install-command: |
|
||||
if [ "${{ matrix.test_type }}" == "core" ]; then
|
||||
echo "Running core tests, installing dependencies with poetry..."
|
||||
poetry install
|
||||
else
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
fi
|
||||
- name: Run ${{matrix.test_type}} tests
|
||||
run: |
|
||||
if [ "${{ matrix.test_type }}" == "core" ]; then
|
||||
make test
|
||||
else
|
||||
make extended_tests
|
||||
fi
|
||||
shell: bash
|
||||
31
.gitignore
vendored
31
.gitignore
vendored
@@ -30,6 +30,12 @@ share/python-wheels/
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# Google GitHub Actions credentials files created by:
|
||||
# https://github.com/google-github-actions/auth
|
||||
#
|
||||
# That action recommends adding this gitignore to prevent accidentally committing keys.
|
||||
gha-creds-*.json
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
@@ -73,6 +79,7 @@ instance/
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
docs/docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
@@ -149,4 +156,26 @@ wandb/
|
||||
|
||||
# integration test artifacts
|
||||
data_map*
|
||||
\[('_type', 'fake'), ('stop', None)]
|
||||
\[('_type', 'fake'), ('stop', None)]
|
||||
|
||||
# Replit files
|
||||
*replit*
|
||||
|
||||
node_modules
|
||||
docs/.yarn/
|
||||
docs/node_modules/
|
||||
docs/.docusaurus/
|
||||
docs/.cache-loader/
|
||||
docs/_dist
|
||||
docs/api_reference/api_reference.rst
|
||||
docs/api_reference/experimental_api_reference.rst
|
||||
docs/api_reference/_build
|
||||
docs/api_reference/*/
|
||||
!docs/api_reference/_static/
|
||||
!docs/api_reference/templates/
|
||||
!docs/api_reference/themes/
|
||||
docs/docs/build
|
||||
docs/docs/node_modules
|
||||
docs/docs/yarn.lock
|
||||
_dist
|
||||
docs/docs/templates
|
||||
@@ -9,10 +9,18 @@ build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
commands:
|
||||
- python -mvirtualenv $READTHEDOCS_VIRTUALENV_PATH
|
||||
- python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
- python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext
|
||||
- python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
- python docs/api_reference/create_api_rst.py
|
||||
- cat docs/api_reference/conf.py
|
||||
- python -m sphinx -T -E -b html -d _build/doctrees -c docs/api_reference docs/api_reference $READTHEDOCS_OUTPUT/html -j auto
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
configuration: docs/api_reference/conf.py
|
||||
|
||||
# If using Sphinx, optionally build your docs in additional formats such as PDF
|
||||
# formats:
|
||||
@@ -21,6 +29,4 @@ sphinx:
|
||||
# Optionally declare the Python requirements required to build your docs
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
- method: pip
|
||||
path: .
|
||||
- requirements: docs/api_reference/requirements.txt
|
||||
|
||||
@@ -5,4 +5,4 @@ authors:
|
||||
given-names: "Harrison"
|
||||
title: "LangChain"
|
||||
date-released: 2022-10-17
|
||||
url: "https://github.com/hwchase17/langchain"
|
||||
url: "https://github.com/langchain-ai/langchain"
|
||||
|
||||
70
MIGRATE.md
Normal file
70
MIGRATE.md
Normal file
@@ -0,0 +1,70 @@
|
||||
# Migrating
|
||||
|
||||
## 🚨Breaking Changes for select chains (SQLDatabase) on 7/28/23
|
||||
|
||||
In an effort to make `langchain` leaner and safer, we are moving select chains to `langchain_experimental`.
|
||||
This migration has already started, but we are remaining backwards compatible until 7/28.
|
||||
On that date, we will remove functionality from `langchain`.
|
||||
Read more about the motivation and the progress [here](https://github.com/langchain-ai/langchain/discussions/8043).
|
||||
|
||||
### Migrating to `langchain_experimental`
|
||||
|
||||
We are moving any experimental components of LangChain, or components with vulnerability issues, into `langchain_experimental`.
|
||||
This guide covers how to migrate.
|
||||
|
||||
### Installation
|
||||
|
||||
Previously:
|
||||
|
||||
`pip install -U langchain`
|
||||
|
||||
Now (only if you want to access things in experimental):
|
||||
|
||||
`pip install -U langchain langchain_experimental`
|
||||
|
||||
### Things in `langchain.experimental`
|
||||
|
||||
Previously:
|
||||
|
||||
`from langchain.experimental import ...`
|
||||
|
||||
Now:
|
||||
|
||||
`from langchain_experimental import ...`
|
||||
|
||||
### PALChain
|
||||
|
||||
Previously:
|
||||
|
||||
`from langchain.chains import PALChain`
|
||||
|
||||
Now:
|
||||
|
||||
`from langchain_experimental.pal_chain import PALChain`
|
||||
|
||||
### SQLDatabaseChain
|
||||
|
||||
Previously:
|
||||
|
||||
`from langchain.chains import SQLDatabaseChain`
|
||||
|
||||
Now:
|
||||
|
||||
`from langchain_experimental.sql import SQLDatabaseChain`
|
||||
|
||||
Alternatively, if you are just interested in using the query generation part of the SQL chain, you can check out [`create_sql_query_chain`](https://github.com/langchain-ai/langchain/blob/master/docs/extras/use_cases/tabular/sql_query.ipynb)
|
||||
|
||||
`from langchain.chains import create_sql_query_chain`
|
||||
|
||||
### `load_prompt` for Python files
|
||||
|
||||
Note: this only applies if you want to load Python files as prompts.
|
||||
If you want to load json/yaml files, no change is needed.
|
||||
|
||||
Previously:
|
||||
|
||||
`from langchain.prompts import load_prompt`
|
||||
|
||||
Now:
|
||||
|
||||
`from langchain_experimental.prompts import load_prompt`
|
||||
90
Makefile
90
Makefile
@@ -1,70 +1,68 @@
|
||||
.PHONY: all clean format lint test tests test_watch integration_tests docker_tests help extended_tests
|
||||
.PHONY: all clean docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck
|
||||
|
||||
# Default target executed when no arguments are given to make.
|
||||
all: help
|
||||
|
||||
coverage:
|
||||
poetry run pytest --cov \
|
||||
--cov-config=.coveragerc \
|
||||
--cov-report xml \
|
||||
--cov-report term-missing:skip-covered
|
||||
|
||||
clean: docs_clean
|
||||
######################
|
||||
# DOCUMENTATION
|
||||
######################
|
||||
|
||||
clean: docs_clean api_docs_clean
|
||||
|
||||
|
||||
docs_build:
|
||||
cd docs && poetry run make html
|
||||
docs/.local_build.sh
|
||||
|
||||
docs_clean:
|
||||
cd docs && poetry run make clean
|
||||
rm -r _dist
|
||||
|
||||
docs_linkcheck:
|
||||
poetry run linkchecker docs/_build/html/index.html
|
||||
poetry run linkchecker _dist/docs/ --ignore-url node_modules
|
||||
|
||||
format:
|
||||
poetry run black .
|
||||
poetry run ruff --select I --fix .
|
||||
api_docs_build:
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
cd docs/api_reference && poetry run make html
|
||||
|
||||
PYTHON_FILES=.
|
||||
lint: PYTHON_FILES=.
|
||||
lint_diff: PYTHON_FILES=$(shell git diff --name-only --diff-filter=d master | grep -E '\.py$$')
|
||||
api_docs_clean:
|
||||
rm -f docs/api_reference/api_reference.rst
|
||||
cd docs/api_reference && poetry run make clean
|
||||
|
||||
lint lint_diff:
|
||||
poetry run mypy $(PYTHON_FILES)
|
||||
poetry run black $(PYTHON_FILES) --check
|
||||
poetry run ruff .
|
||||
api_docs_linkcheck:
|
||||
poetry run linkchecker docs/api_reference/_build/html/index.html
|
||||
|
||||
TEST_FILE ?= tests/unit_tests/
|
||||
spell_check:
|
||||
poetry run codespell --toml pyproject.toml
|
||||
|
||||
test:
|
||||
poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
|
||||
spell_fix:
|
||||
poetry run codespell --toml pyproject.toml -w
|
||||
|
||||
tests:
|
||||
poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
|
||||
######################
|
||||
# LINTING AND FORMATTING
|
||||
######################
|
||||
|
||||
extended_tests:
|
||||
poetry run pytest --disable-socket --allow-unix-socket --only-extended tests/unit_tests
|
||||
lint:
|
||||
poetry run ruff docs templates cookbook
|
||||
poetry run ruff format docs templates cookbook --diff
|
||||
|
||||
test_watch:
|
||||
poetry run ptw --now . -- tests/unit_tests
|
||||
format format_diff:
|
||||
poetry run ruff format docs templates cookbook
|
||||
poetry run ruff --select I --fix docs templates cookbook
|
||||
|
||||
integration_tests:
|
||||
poetry run pytest tests/integration_tests
|
||||
|
||||
docker_tests:
|
||||
docker build -t my-langchain-image:test .
|
||||
docker run --rm my-langchain-image:test
|
||||
######################
|
||||
# HELP
|
||||
######################
|
||||
|
||||
help:
|
||||
@echo '----'
|
||||
@echo 'coverage - run unit tests and generate coverage report'
|
||||
@echo '===================='
|
||||
@echo '-- DOCUMENTATION --'
|
||||
@echo 'clean - run docs_clean and api_docs_clean'
|
||||
@echo 'docs_build - build the documentation'
|
||||
@echo 'docs_clean - clean the documentation build artifacts'
|
||||
@echo 'docs_linkcheck - run linkchecker on the documentation'
|
||||
@echo 'format - run code formatters'
|
||||
@echo 'lint - run linters'
|
||||
@echo 'test - run unit tests'
|
||||
@echo 'tests - run unit tests'
|
||||
@echo 'test TEST_FILE=<test_file> - run all tests in file'
|
||||
@echo 'extended_tests - run only extended unit tests'
|
||||
@echo 'test_watch - run unit tests in watch mode'
|
||||
@echo 'integration_tests - run integration tests'
|
||||
@echo 'docker_tests - run unit tests in docker'
|
||||
@echo 'api_docs_build - build the API Reference documentation'
|
||||
@echo 'api_docs_clean - clean the API Reference documentation build artifacts'
|
||||
@echo 'api_docs_linkcheck - run linkchecker on the API Reference documentation'
|
||||
@echo 'spell_check - run codespell on the project'
|
||||
@echo 'spell_fix - run codespell on the project and fix the errors'
|
||||
@echo '-- TEST and LINT tasks are within libs/*/ per-package --'
|
||||
|
||||
105
README.md
105
README.md
@@ -2,73 +2,85 @@
|
||||
|
||||
⚡ Building applications with LLMs through composability ⚡
|
||||
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/lint.yml)
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/test.yml)
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml)
|
||||
[](https://github.com/langchain-ai/langchain/releases)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/langchain_ci.yml)
|
||||
[](https://github.com/langchain-ai/langchain/actions/workflows/langchain_experimental_ci.yml)
|
||||
[](https://pepy.tech/project/langchain)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://twitter.com/langchainai)
|
||||
[](https://discord.gg/6adMQxSpJS)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/hwchase17/langchain)
|
||||
[](https://codespaces.new/hwchase17/langchain)
|
||||
[](https://star-history.com/#hwchase17/langchain)
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://star-history.com/#langchain-ai/langchain)
|
||||
[](https://libraries.io/github/langchain-ai/langchain)
|
||||
[](https://github.com/langchain-ai/langchain/issues)
|
||||
|
||||
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/hwchase17/langchainjs).
|
||||
|
||||
**Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.
|
||||
Please fill out [this form](https://forms.gle/57d8AmXBYp8PP8tZA) and we'll set up a dedicated support Slack channel.
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to get off the waitlist or speak with our sales team.
|
||||
|
||||
## Quick Install
|
||||
|
||||
`pip install langchain`
|
||||
or
|
||||
`conda install langchain -c conda-forge`
|
||||
With pip:
|
||||
```bash
|
||||
pip install langchain
|
||||
```
|
||||
|
||||
## 🤔 What is this?
|
||||
With conda:
|
||||
```bash
|
||||
pip install langsmith && conda install langchain -c conda-forge
|
||||
```
|
||||
|
||||
Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge.
|
||||
## 🤔 What is LangChain?
|
||||
|
||||
This library aims to assist in the development of those types of applications. Common examples of these applications include:
|
||||
**LangChain** is a framework for developing applications powered by language models. It enables applications that:
|
||||
- **Are context-aware**: connect a language model to sources of context (prompt instructions, few shot examples, content to ground its response in, etc.)
|
||||
- **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc.)
|
||||
|
||||
**❓ Question Answering over specific documents**
|
||||
This framework consists of several parts.
|
||||
- **LangChain Libraries**: The Python and JavaScript libraries. Contains interfaces and integrations for a myriad of components, a basic run time for combining these components into chains and agents, and off-the-shelf implementations of chains and agents.
|
||||
- **[LangChain Templates](templates)**: A collection of easily deployable reference architectures for a wide variety of tasks.
|
||||
- **[LangServe](https://github.com/langchain-ai/langserve)**: A library for deploying LangChain chains as a REST API.
|
||||
- **[LangSmith](https://smith.langchain.com)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
- [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/question_answering.html)
|
||||
- End-to-end Example: [Question Answering over Notion Database](https://github.com/hwchase17/notion-qa)
|
||||
**This repo contains the `langchain` ([here](libs/langchain)), `langchain-experimental` ([here](libs/experimental)), and `langchain-cli` ([here](libs/cli)) Python packages, as well as [LangChain Templates](templates).**
|
||||
|
||||
**💬 Chatbots**
|
||||

|
||||
|
||||
- [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/chatbots.html)
|
||||
- End-to-end Example: [Chat-LangChain](https://github.com/hwchase17/chat-langchain)
|
||||
## 🧱 What can you build with LangChain?
|
||||
**❓ Retrieval augmented generation**
|
||||
|
||||
**🤖 Agents**
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/question_answering/)
|
||||
- End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain)
|
||||
|
||||
- [Documentation](https://langchain.readthedocs.io/en/latest/modules/agents.html)
|
||||
- End-to-end Example: [GPT+WolframAlpha](https://huggingface.co/spaces/JavaFXpert/Chat-GPT-LangChain)
|
||||
**💬 Analyzing structured data**
|
||||
|
||||
## 📖 Documentation
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/qa_structured/sql)
|
||||
- End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain/tree/master/templates/sql-llama2)
|
||||
|
||||
Please see [here](https://langchain.readthedocs.io/en/latest/?) for full documentation on:
|
||||
**🤖 Chatbots**
|
||||
|
||||
- Getting started (installation, setting up the environment, simple examples)
|
||||
- How-To examples (demos, integrations, helper functions)
|
||||
- Reference (full API docs)
|
||||
- Resources (high-level explanation of core concepts)
|
||||
- [Documentation](https://python.langchain.com/docs/use_cases/chatbots)
|
||||
- End-to-end Example: [Web LangChain (web researcher chatbot)](https://weblangchain.vercel.app) and [repo](https://github.com/langchain-ai/weblangchain)
|
||||
|
||||
## 🚀 What can this help with?
|
||||
And much more! Head to the [Use cases](https://python.langchain.com/docs/use_cases/) section of the docs for more.
|
||||
|
||||
There are six main areas that LangChain is designed to help with.
|
||||
These are, in increasing order of complexity:
|
||||
## 🚀 How does LangChain help?
|
||||
The main value props of the LangChain libraries are:
|
||||
1. **Components**: composable tools and integrations for working with language models. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
|
||||
2. **Off-the-shelf chains**: built-in assemblages of components for accomplishing higher-level tasks
|
||||
|
||||
**📃 LLMs and Prompts:**
|
||||
Off-the-shelf chains make it easy to get started. Components make it easy to customize existing chains and build new ones.
|
||||
|
||||
Components fall into the following **modules**:
|
||||
|
||||
**📃 Model I/O:**
|
||||
|
||||
This includes prompt management, prompt optimization, a generic interface for all LLMs, and common utilities for working with LLMs.
|
||||
|
||||
**🔗 Chains:**
|
||||
|
||||
Chains go beyond a single LLM call and involve sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.
|
||||
|
||||
**📚 Data Augmented Generation:**
|
||||
**📚 Retrieval:**
|
||||
|
||||
Data Augmented Generation involves specific types of chains that first interact with an external data source to fetch data for use in the generation step. Examples include summarization of long pieces of text and question/answering over specific data sources.
|
||||
|
||||
@@ -76,15 +88,16 @@ Data Augmented Generation involves specific types of chains that first interact
|
||||
|
||||
Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end-to-end agents.
|
||||
|
||||
**🧠 Memory:**
|
||||
## 📖 Documentation
|
||||
|
||||
Memory refers to persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.
|
||||
Please see [here](https://python.langchain.com) for full documentation, which includes:
|
||||
|
||||
**🧐 Evaluation:**
|
||||
- [Getting started](https://python.langchain.com/docs/get_started/introduction): installation, setting up the environment, simple examples
|
||||
- Overview of the [interfaces](https://python.langchain.com/docs/expression_language/), [modules](https://python.langchain.com/docs/modules/) and [integrations](https://python.langchain.com/docs/integrations/providers)
|
||||
- [Use case](https://python.langchain.com/docs/use_cases/qa_structured/sql) walkthroughs and best practice [guides](https://python.langchain.com/docs/guides/adapters/openai)
|
||||
- [LangSmith](https://python.langchain.com/docs/langsmith/), [LangServe](https://python.langchain.com/docs/langserve), and [LangChain Template](https://python.langchain.com/docs/templates/) overviews
|
||||
- [Reference](https://api.python.langchain.com): full API docs
|
||||
|
||||
[BETA] Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
|
||||
|
||||
For more information on these concepts, please see our [full documentation](https://langchain.readthedocs.io/en/latest/).
|
||||
|
||||
## 💁 Contributing
|
||||
|
||||
|
||||
6
SECURITY.md
Normal file
6
SECURITY.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report security vulnerabilities by email to `security@langchain.dev`.
|
||||
This email is an alias to a subset of our maintainers, and will ensure the issue is promptly triaged and acted upon as needed.
|
||||
397
cookbook/LLaMA2_sql_chat.ipynb
Normal file
397
cookbook/LLaMA2_sql_chat.ipynb
Normal file
@@ -0,0 +1,397 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "fc935871-7640-41c6-b798-58514d860fe0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## LLaMA2 chat with SQL\n",
|
||||
"\n",
|
||||
"Open source, local LLMs are great to consider for any application that demands data privacy.\n",
|
||||
"\n",
|
||||
"SQL is one good example. \n",
|
||||
"\n",
|
||||
"This cookbook shows how to perform text-to-SQL using various local versions of LLaMA2 run locally.\n",
|
||||
"\n",
|
||||
"## Packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "81adcf8b-395a-4f02-8749-ac976942b446",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain replicate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e13ed66-300b-4a23-b8ac-44df68ee4733",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## LLM\n",
|
||||
"\n",
|
||||
"There are a few ways to access LLaMA2.\n",
|
||||
"\n",
|
||||
"To run locally, we use Ollama.ai. \n",
|
||||
"\n",
|
||||
"See [here](https://python.langchain.com/docs/integrations/chat/ollama) for details on installation and setup.\n",
|
||||
"\n",
|
||||
"Also, see [here](https://python.langchain.com/docs/guides/local_llms) for our full guide on local LLMs.\n",
|
||||
" \n",
|
||||
"To use an external API, which is not private, we can use Replicate."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6a75a5c6-34ee-4ab9-a664-d9b432d812ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Init param `input` is deprecated, please use `model_kwargs` instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Local\n",
|
||||
"from langchain.chat_models import ChatOllama\n",
|
||||
"\n",
|
||||
"llama2_chat = ChatOllama(model=\"llama2:13b-chat\")\n",
|
||||
"llama2_code = ChatOllama(model=\"codellama:7b-instruct\")\n",
|
||||
"\n",
|
||||
"# API\n",
|
||||
"from langchain.llms import Replicate\n",
|
||||
"\n",
|
||||
"# REPLICATE_API_TOKEN = getpass()\n",
|
||||
"# os.environ[\"REPLICATE_API_TOKEN\"] = REPLICATE_API_TOKEN\n",
|
||||
"replicate_id = \"meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d\"\n",
|
||||
"llama2_chat_replicate = Replicate(\n",
|
||||
" model=replicate_id, input={\"temperature\": 0.01, \"max_length\": 500, \"top_p\": 1}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "ce96f7ea-b3d5-44e1-9fa5-a79e04a9e1fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Simply set the LLM we want to use\n",
|
||||
"llm = llama2_chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "80222165-f353-4e35-a123-5f70fd70c6c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## DB\n",
|
||||
"\n",
|
||||
"Connect to a SQLite DB.\n",
|
||||
"\n",
|
||||
"To create this particular DB, you can use the code and follow the steps shown [here](https://github.com/facebookresearch/llama-recipes/blob/main/demo_apps/StructuredLlama.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "025bdd82-3bb1-4948-bc7c-c3ccd94fd05c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///nba_roster.db\", sample_rows_in_table_info=0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_schema(_):\n",
|
||||
" return db.get_table_info()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def run_query(query):\n",
|
||||
" return db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "654b3577-baa2-4e12-a393-f40e5db49ac7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query a SQL DB \n",
|
||||
"\n",
|
||||
"Follow the runnables workflow [here](https://python.langchain.com/docs/expression_language/cookbook/sql_db)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "5a4933ea-d9c0-4b0a-8177-ba4490c6532b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' SELECT \"Team\" FROM nba_roster WHERE \"NAME\" = \\'Klay Thompson\\';'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Prompt\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query:\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Given an input question, convert it to a SQL query. No pre-amble.\"),\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Chain to query\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"sql_response = (\n",
|
||||
" RunnablePassthrough.assign(schema=get_schema)\n",
|
||||
" | prompt\n",
|
||||
" | llm.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"sql_response.invoke({\"question\": \"What team is Klay Thompson on?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a0e9e2c8-9b88-4853-ac86-001bc6cc6695",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can review the results:\n",
|
||||
"\n",
|
||||
"* [LangSmith trace](https://smith.langchain.com/public/afa56a06-b4e2-469a-a60f-c1746e75e42b/r) LLaMA2-13 Replicate API\n",
|
||||
"* [LangSmith trace](https://smith.langchain.com/public/2d4ecc72-6b8f-4523-8f0b-ea95c6b54a1d/r) LLaMA2-13 local \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "2a2825e3-c1b6-4f7d-b9c9-d9835de323bb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Based on the table schema and SQL query, there are 30 unique teams in the NBA.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Chain to answer\n",
|
||||
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"Given an input question and SQL response, convert it to a natural langugae answer. No pre-amble.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(query=sql_response)\n",
|
||||
" | RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" response=lambda x: db.run(x[\"query\"]),\n",
|
||||
" )\n",
|
||||
" | prompt_response\n",
|
||||
" | llm\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain.invoke({\"question\": \"How many unique teams are there?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ec17b3ee-6618-4681-b6df-089bbb5ffcd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can review the results:\n",
|
||||
"\n",
|
||||
"* [LangSmith trace](https://smith.langchain.com/public/10420721-746a-4806-8ecf-d6dc6399d739/r) LLaMA2-13 Replicate API\n",
|
||||
"* [LangSmith trace](https://smith.langchain.com/public/5265ebab-0a22-4f37-936b-3300f2dfa1c1/r) LLaMA2-13 local "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e85381b-1edc-4bb3-a7bd-2ab23f81e54d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat with a SQL DB \n",
|
||||
"\n",
|
||||
"Next, we can add memory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "022868f2-128e-42f5-8d90-d3bb2f11d994",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' SELECT \"Team\" FROM nba_roster WHERE \"NAME\" = \\'Klay Thompson\\';'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Prompt\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"\n",
|
||||
"template = \"\"\"Given an input question, convert it to a SQL query. No pre-amble. Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
"{schema}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", template),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory(return_messages=True)\n",
|
||||
"\n",
|
||||
"# Chain to query with memory\n",
|
||||
"from langchain.schema.runnable import RunnableLambda\n",
|
||||
"\n",
|
||||
"sql_chain = (\n",
|
||||
" RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" history=RunnableLambda(lambda x: memory.load_memory_variables(x)[\"history\"]),\n",
|
||||
" )\n",
|
||||
" | prompt\n",
|
||||
" | llm.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def save(input_output):\n",
|
||||
" output = {\"output\": input_output.pop(\"output\")}\n",
|
||||
" memory.save_context(input_output, output)\n",
|
||||
" return output[\"output\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"sql_response_memory = RunnablePassthrough.assign(output=sql_chain) | save\n",
|
||||
"sql_response_memory.invoke({\"question\": \"What team is Klay Thompson on?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "800a7a3b-f411-478b-af51-2310cd6e0425",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Sure! Here\\'s the natural language response based on the given input:\\n\\n\"Klay Thompson\\'s salary is $43,219,440.\"')"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Chain to answer\n",
|
||||
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"Given an input question and SQL response, convert it to a natural langugae answer. No pre-amble.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(query=sql_response_memory)\n",
|
||||
" | RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" response=lambda x: db.run(x[\"query\"]),\n",
|
||||
" )\n",
|
||||
" | prompt_response\n",
|
||||
" | llm\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain.invoke({\"question\": \"What is his salary?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b77fee61-f4da-4bb1-8285-14101e505518",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here is the [trace](https://smith.langchain.com/public/54794d18-2337-4ce2-8b9f-3d8a2df89e51/r)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
826
cookbook/Multi_modal_RAG.ipynb
Normal file
826
cookbook/Multi_modal_RAG.ipynb
Normal file
File diff suppressed because one or more lines are too long
57
cookbook/README.md
Normal file
57
cookbook/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# LangChain cookbook
|
||||
|
||||
Example code for building applications with LangChain, with an emphasis on more applied and end-to-end examples than contained in the [main documentation](https://python.langchain.com).
|
||||
|
||||
Notebook | Description
|
||||
:- | :-
|
||||
[LLaMA2_sql_chat.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/LLaMA2_sql_chat.ipynb) | Build a chat application that interacts with a SQL database using an open source llm (llama2), specifically demonstrated on an SQLite database containing rosters.
|
||||
[Semi_Structured_RAG.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_Structured_RAG.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data, including text and tables, using unstructured for parsing, multi-vector retriever for storing, and lcel for implementing chains.
|
||||
[Semi_structured_and_multi_moda...](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data and images, using unstructured for parsing, multi-vector retriever for storage and retrieval, and lcel for implementing chains.
|
||||
[Semi_structured_multi_modal_RA...](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data and images, using various tools and methods such as unstructured for parsing, multi-vector retriever for storing, lcel for implementing chains, and open source language models like llama2, llava, and gpt4all.
|
||||
[analyze_document.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/analyze_document.ipynb) | Analyze a single long document.
|
||||
[autogpt/autogpt.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/autogpt/autogpt.ipynb) | Implement autogpt, a language model, with langchain primitives such as llms, prompttemplates, vectorstores, embeddings, and tools.
|
||||
[autogpt/marathon_times.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/autogpt/marathon_times.ipynb) | Implement autogpt for finding winning marathon times.
|
||||
[baby_agi.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/baby_agi.ipynb) | Implement babyagi, an ai agent that can generate and execute tasks based on a given objective, with the flexibility to swap out specific vectorstores/model providers.
|
||||
[baby_agi_with_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/baby_agi_with_agent.ipynb) | Swap out the execution chain in the babyagi notebook with an agent that has access to tools, aiming to obtain more reliable information.
|
||||
[camel_role_playing.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/camel_role_playing.ipynb) | Implement the camel framework for creating autonomous cooperative agents in large-scale language models, using role-playing and inception prompting to guide chat agents towards task completion.
|
||||
[causal_program_aided_language_...](https://github.com/langchain-ai/langchain/tree/master/cookbook/causal_program_aided_language_model.ipynb) | Implement the causal program-aided language (cpal) chain, which improves upon the program-aided language (pal) by incorporating causal structure to prevent hallucination in language models, particularly when dealing with complex narratives and math problems with nested dependencies.
|
||||
[code-analysis-deeplake.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/code-analysis-deeplake.ipynb) | Analyze its own code base with the help of gpt and activeloop's deep lake.
|
||||
[custom_agent_with_plugin_retri...](https://github.com/langchain-ai/langchain/tree/master/cookbook/custom_agent_with_plugin_retrieval.ipynb) | Build a custom agent that can interact with ai plugins by retrieving tools and creating natural language wrappers around openapi endpoints.
|
||||
[custom_agent_with_plugin_retri...](https://github.com/langchain-ai/langchain/tree/master/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb) | Build a custom agent with plugin retrieval functionality, utilizing ai plugins from the `plugnplai` directory.
|
||||
[databricks_sql_db.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/databricks_sql_db.ipynb) | Connect to databricks runtimes and databricks sql.
|
||||
[deeplake_semantic_search_over_...](https://github.com/langchain-ai/langchain/tree/master/cookbook/deeplake_semantic_search_over_chat.ipynb) | Perform semantic search and question-answering over a group chat using activeloop's deep lake with gpt4.
|
||||
[elasticsearch_db_qa.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/elasticsearch_db_qa.ipynb) | Interact with elasticsearch analytics databases in natural language and build search queries via the elasticsearch dsl API.
|
||||
[extraction_openai_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/extraction_openai_tools.ipynb) | Structured Data Extraction with OpenAI Tools
|
||||
[forward_looking_retrieval_augm...](https://github.com/langchain-ai/langchain/tree/master/cookbook/forward_looking_retrieval_augmented_generation.ipynb) | Implement the forward-looking active retrieval augmented generation (flare) method, which generates answers to questions, identifies uncertain tokens, generates hypothetical questions based on these tokens, and retrieves relevant documents to continue generating the answer.
|
||||
[generative_agents_interactive_...](https://github.com/langchain-ai/langchain/tree/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) | Implement a generative agent that simulates human behavior, based on a research paper, using a time-weighted memory object backed by a langchain retriever.
|
||||
[gymnasium_agent_simulation.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/gymnasium_agent_simulation.ipynb) | Create a simple agent-environment interaction loop in simulated environments like text-based games with gymnasium.
|
||||
[hugginggpt.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/hugginggpt.ipynb) | Implement hugginggpt, a system that connects language models like chatgpt with the machine learning community via hugging face.
|
||||
[hypothetical_document_embeddin...](https://github.com/langchain-ai/langchain/tree/master/cookbook/hypothetical_document_embeddings.ipynb) | Improve document indexing with hypothetical document embeddings (hyde), an embedding technique that generates and embeds hypothetical answers to queries.
|
||||
[learned_prompt_optimization.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/learned_prompt_optimization.ipynb) | Automatically enhance language model prompts by injecting specific terms using reinforcement learning, which can be used to personalize responses based on user preferences.
|
||||
[llm_bash.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_bash.ipynb) | Perform simple filesystem commands using language learning models (llms) and a bash process.
|
||||
[llm_checker.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_checker.ipynb) | Create a self-checking chain using the llmcheckerchain function.
|
||||
[llm_math.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_math.ipynb) | Solve complex word math problems using language models and python repls.
|
||||
[llm_summarization_checker.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_summarization_checker.ipynb) | Check the accuracy of text summaries, with the option to run the checker multiple times for improved results.
|
||||
[llm_symbolic_math.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_symbolic_math.ipynb) | Solve algebraic equations with the help of llms (language learning models) and sympy, a python library for symbolic mathematics.
|
||||
[meta_prompt.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/meta_prompt.ipynb) | Implement the meta-prompt concept, which is a method for building self-improving agents that reflect on their own performance and modify their instructions accordingly.
|
||||
[multi_modal_output_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multi_modal_output_agent.ipynb) | Generate multi-modal outputs, specifically images and text.
|
||||
[multi_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multi_player_dnd.ipynb) | Simulate multi-player dungeons & dragons games, with a custom function determining the speaking schedule of the agents.
|
||||
[multiagent_authoritarian.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multiagent_authoritarian.ipynb) | Implement a multi-agent simulation where a privileged agent controls the conversation, including deciding who speaks and when the conversation ends, in the context of a simulated news network.
|
||||
[multiagent_bidding.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multiagent_bidding.ipynb) | Implement a multi-agent simulation where agents bid to speak, with the highest bidder speaking next, demonstrated through a fictitious presidential debate example.
|
||||
[myscale_vector_sql.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/myscale_vector_sql.ipynb) | Access and interact with the myscale integrated vector database, which can enhance the performance of language model (llm) applications.
|
||||
[openai_functions_retrieval_qa....](https://github.com/langchain-ai/langchain/tree/master/cookbook/openai_functions_retrieval_qa.ipynb) | Structure response output in a question-answering system by incorporating openai functions into a retrieval pipeline.
|
||||
[openai_v1_cookbook.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/openai_v1_cookbook.ipynb) | Explore new functionality released alongside the V1 release of the OpenAI Python library.
|
||||
[petting_zoo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/petting_zoo.ipynb) | Create multi-agent simulations with simulated environments using the petting zoo library.
|
||||
[plan_and_execute_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/plan_and_execute_agent.ipynb) | Create plan-and-execute agents that accomplish objectives by planning tasks with a language model (llm) and executing them with a separate agent.
|
||||
[press_releases.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/press_releases.ipynb) | Retrieve and query company press release data powered by [Kay.ai](https://kay.ai).
|
||||
[program_aided_language_model.i...](https://github.com/langchain-ai/langchain/tree/master/cookbook/program_aided_language_model.ipynb) | Implement program-aided language models as described in the provided research paper.
|
||||
[qa_citations.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/qa_citations.ipynb) | Different ways to get a model to cite its sources.
|
||||
[retrieval_in_sql.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/retrieval_in_sql.ipynb) | Perform retrieval-augmented-generation (rag) on a PostgreSQL database using pgvector.
|
||||
[sales_agent_with_context.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/sales_agent_with_context.ipynb) | Implement a context-aware ai sales agent, salesgpt, that can have natural sales conversations, interact with other systems, and use a product knowledge base to discuss a company's offerings.
|
||||
[self_query_hotel_search.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/self_query_hotel_search.ipynb) | Build a hotel room search feature with self-querying retrieval, using a specific hotel recommendation dataset.
|
||||
[smart_llm.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/smart_llm.ipynb) | Implement a smartllmchain, a self-critique chain that generates multiple output proposals, critiques them to find the best one, and then improves upon it to produce a final output.
|
||||
[tree_of_thought.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/tree_of_thought.ipynb) | Query a large language model using the tree of thought technique.
|
||||
[twitter-the-algorithm-analysis...](https://github.com/langchain-ai/langchain/tree/master/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb) | Analyze the source code of the Twitter algorithm with the help of gpt4 and activeloop's deep lake.
|
||||
[two_agent_debate_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_agent_debate_tools.ipynb) | Simulate multi-agent dialogues where the agents can utilize various tools.
|
||||
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
|
||||
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
|
||||
455
cookbook/Semi_Structured_RAG.ipynb
Normal file
455
cookbook/Semi_Structured_RAG.ipynb
Normal file
File diff suppressed because one or more lines are too long
746
cookbook/Semi_structured_and_multi_modal_RAG.ipynb
Normal file
746
cookbook/Semi_structured_and_multi_modal_RAG.ipynb
Normal file
File diff suppressed because one or more lines are too long
642
cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb
Normal file
642
cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb
Normal file
File diff suppressed because one or more lines are too long
833
cookbook/advanced_rag_eval.ipynb
Normal file
833
cookbook/advanced_rag_eval.ipynb
Normal file
File diff suppressed because one or more lines are too long
105
cookbook/analyze_document.ipynb
Normal file
105
cookbook/analyze_document.ipynb
Normal file
@@ -0,0 +1,105 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f69d4a4c-137d-47e9-bea1-786afce9c1c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Analyze a single long document\n",
|
||||
"\n",
|
||||
"The AnalyzeDocumentChain takes in a single document, splits it up, and then runs it through a CombineDocumentsChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2a0707ce-6d2d-471b-bc33-64da32a7b3f0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"../docs/docs/modules/state_of_the_union.txt\") as f:\n",
|
||||
" state_of_the_union = f.read()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ca14d161-2d5b-4a6c-a296-77d8ce4b28cd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import AnalyzeDocumentChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "9f97406c-85a9-45fb-99ce-9138c0ba3731",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"\n",
|
||||
"qa_chain = load_qa_chain(llm, chain_type=\"map_reduce\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0871a753-f5bb-4b4f-a394-f87f2691f659",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa_document_chain = AnalyzeDocumentChain(combine_docs_chain=qa_chain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "e6f86428-3c2c-46a0-a57c-e22826fdbf91",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The President said, \"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa_document_chain.run(\n",
|
||||
" input_document=state_of_the_union,\n",
|
||||
" question=\"what did the president say about justice breyer?\",\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
212
cookbook/autogpt/autogpt.ipynb
Normal file
212
cookbook/autogpt/autogpt.ipynb
Normal file
@@ -0,0 +1,212 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14f8b67b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AutoGPT\n",
|
||||
"\n",
|
||||
"Implementation of https://github.com/Significant-Gravitas/Auto-GPT but with LangChain primitives (LLMs, PromptTemplates, VectorStores, Embeddings, Tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "192496a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up tools\n",
|
||||
"\n",
|
||||
"We'll set up an AutoGPT with a search tool, and write-file tool, and a read-file tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7c2c9b54",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain.tools.file_management.read import ReadFileTool\n",
|
||||
"from langchain.tools.file_management.write import WriteFileTool\n",
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\",\n",
|
||||
" ),\n",
|
||||
" WriteFileTool(),\n",
|
||||
" ReadFileTool(),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e39ee28",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up memory\n",
|
||||
"\n",
|
||||
"The memory here is used for the agents intermediate steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "72bc204d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import FAISS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1df7b724",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e40fd657",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup model and AutoGPT\n",
|
||||
"\n",
|
||||
"Initialize everything! We will use ChatOpenAI model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3393bc23",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "709c08c2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = AutoGPT.from_llm_and_tools(\n",
|
||||
" ai_name=\"Tom\",\n",
|
||||
" ai_role=\"Assistant\",\n",
|
||||
" tools=tools,\n",
|
||||
" llm=ChatOpenAI(temperature=0),\n",
|
||||
" memory=vectorstore.as_retriever(),\n",
|
||||
")\n",
|
||||
"# Set verbose to be true\n",
|
||||
"agent.chain.verbose = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0f208d9",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Run an example\n",
|
||||
"\n",
|
||||
"Here we will make it write a weather report for SF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d119d788",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent.run([\"write a weather report for SF today\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f13f8322",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Chat History Memory\n",
|
||||
"\n",
|
||||
"In addition to the memory that holds the agent immediate steps, we also have a chat history memory. By default, the agent will use 'ChatMessageHistory' and it can be changed. This is useful when you want to use a different type of memory for example 'FileChatHistoryMemory'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2a81f5ad",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory.chat_message_histories import FileChatMessageHistory\n",
|
||||
"\n",
|
||||
"agent = AutoGPT.from_llm_and_tools(\n",
|
||||
" ai_name=\"Tom\",\n",
|
||||
" ai_role=\"Assistant\",\n",
|
||||
" tools=tools,\n",
|
||||
" llm=ChatOpenAI(temperature=0),\n",
|
||||
" memory=vectorstore.as_retriever(),\n",
|
||||
" chat_history_memory=FileChatMessageHistory(\"chat_history.txt\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b1403008",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -33,17 +33,16 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# General \n",
|
||||
"import os\n",
|
||||
"import pandas as pd\n",
|
||||
"from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"# General\n",
|
||||
"import asyncio\n",
|
||||
"import nest_asyncio\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import nest_asyncio\n",
|
||||
"import pandas as pd\n",
|
||||
"from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"\n",
|
||||
"# Needed synce jupyter runs an async eventloop\n",
|
||||
"nest_asyncio.apply()"
|
||||
@@ -92,12 +91,14 @@
|
||||
"import os\n",
|
||||
"from contextlib import contextmanager\n",
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain.agents import tool\n",
|
||||
"from langchain.tools.file_management.read import ReadFileTool\n",
|
||||
"from langchain.tools.file_management.write import WriteFileTool\n",
|
||||
"\n",
|
||||
"ROOT_DIR = \"./data/\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@contextmanager\n",
|
||||
"def pushd(new_dir):\n",
|
||||
" \"\"\"Context manager for changing the current working directory.\"\"\"\n",
|
||||
@@ -108,6 +109,7 @@
|
||||
" finally:\n",
|
||||
" os.chdir(prev_dir)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def process_csv(\n",
|
||||
" csv_file_path: str, instructions: str, output_path: Optional[str] = None\n",
|
||||
@@ -190,10 +192,12 @@
|
||||
" await browser.close()\n",
|
||||
" return results\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def run_async(coro):\n",
|
||||
" event_loop = asyncio.get_event_loop()\n",
|
||||
" return event_loop.run_until_complete(coro)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def browse_web_page(url: str) -> str:\n",
|
||||
" \"\"\"Verbose way to scrape a whole webpage. Likely to cause issues parsing.\"\"\"\n",
|
||||
@@ -219,27 +223,34 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import BaseTool, DuckDuckGoSearchRun\n",
|
||||
"from langchain.chains.qa_with_sources.loading import (\n",
|
||||
" BaseCombineDocumentsChain,\n",
|
||||
" load_qa_with_sources_chain,\n",
|
||||
")\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"from langchain.tools import BaseTool, DuckDuckGoSearchRun\n",
|
||||
"from pydantic import Field\n",
|
||||
"from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _get_text_splitter():\n",
|
||||
" return RecursiveCharacterTextSplitter(\n",
|
||||
" # Set a really small chunk size, just to show.\n",
|
||||
" chunk_size = 500,\n",
|
||||
" chunk_overlap = 20,\n",
|
||||
" length_function = len,\n",
|
||||
" chunk_size=500,\n",
|
||||
" chunk_overlap=20,\n",
|
||||
" length_function=len,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class WebpageQATool(BaseTool):\n",
|
||||
" name = \"query_webpage\"\n",
|
||||
" description = \"Browse a webpage and retrieve the information relevant to the question.\"\n",
|
||||
" text_splitter: RecursiveCharacterTextSplitter = Field(default_factory=_get_text_splitter)\n",
|
||||
" description = (\n",
|
||||
" \"Browse a webpage and retrieve the information relevant to the question.\"\n",
|
||||
" )\n",
|
||||
" text_splitter: RecursiveCharacterTextSplitter = Field(\n",
|
||||
" default_factory=_get_text_splitter\n",
|
||||
" )\n",
|
||||
" qa_chain: BaseCombineDocumentsChain\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def _run(self, url: str, question: str) -> str:\n",
|
||||
" \"\"\"Useful for browsing websites and scraping the text information.\"\"\"\n",
|
||||
" result = browse_web_page.run(url)\n",
|
||||
@@ -248,15 +259,22 @@
|
||||
" results = []\n",
|
||||
" # TODO: Handle this with a MapReduceChain\n",
|
||||
" for i in range(0, len(web_docs), 4):\n",
|
||||
" input_docs = web_docs[i:i+4]\n",
|
||||
" window_result = self.qa_chain({\"input_documents\": input_docs, \"question\": question}, return_only_outputs=True)\n",
|
||||
" input_docs = web_docs[i : i + 4]\n",
|
||||
" window_result = self.qa_chain(\n",
|
||||
" {\"input_documents\": input_docs, \"question\": question},\n",
|
||||
" return_only_outputs=True,\n",
|
||||
" )\n",
|
||||
" results.append(f\"Response from window {i} - {window_result}\")\n",
|
||||
" results_docs = [Document(page_content=\"\\n\".join(results), metadata={\"source\": url})]\n",
|
||||
" return self.qa_chain({\"input_documents\": results_docs, \"question\": question}, return_only_outputs=True)\n",
|
||||
" \n",
|
||||
" results_docs = [\n",
|
||||
" Document(page_content=\"\\n\".join(results), metadata={\"source\": url})\n",
|
||||
" ]\n",
|
||||
" return self.qa_chain(\n",
|
||||
" {\"input_documents\": results_docs, \"question\": question},\n",
|
||||
" return_only_outputs=True,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" async def _arun(self, url: str, question: str) -> str:\n",
|
||||
" raise NotImplementedError\n",
|
||||
" "
|
||||
" raise NotImplementedError"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -292,10 +310,9 @@
|
||||
"source": [
|
||||
"# Memory\n",
|
||||
"import faiss\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.tools.human.tool import HumanInputRun\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"embedding_size = 1536\n",
|
||||
@@ -590,7 +607,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run([\"What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\"])"
|
||||
"agent.run(\n",
|
||||
" [\n",
|
||||
" \"What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\"\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
251
cookbook/baby_agi.ipynb
Normal file
251
cookbook/baby_agi.ipynb
Normal file
@@ -0,0 +1,251 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "517a9fd4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BabyAGI User Guide\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to implement [BabyAGI](https://github.com/yoheinakajima/babyagi/tree/main) by [Yohei Nakajima](https://twitter.com/yoheinakajima). BabyAGI is an AI agent that can generate and pretend to execute tasks based on a given objective.\n",
|
||||
"\n",
|
||||
"This guide will help you understand the components to create your own recursive agents.\n",
|
||||
"\n",
|
||||
"Although BabyAGI uses specific vectorstores/model providers (Pinecone, OpenAI), one of the benefits of implementing it with LangChain is that you can easily swap those out for different options. In this implementation we use a FAISS vectorstore (because it runs locally and is free)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "556af556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install and Import Required Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c8a354b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import BabyAGI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09f70772",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connect to the Vector Store\n",
|
||||
"\n",
|
||||
"Depending on what vectorstore you use, this step may look different."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "794045d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.vectorstores import FAISS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6e0305eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "05ba762e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run the BabyAGI\n",
|
||||
"\n",
|
||||
"Now it's time to create the BabyAGI controller and watch it try to accomplish your objective."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3d220b69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OBJECTIVE = \"Write a weather report for SF today\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8a8e5543",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "3d69899b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Logging of LLMChains\n",
|
||||
"verbose = False\n",
|
||||
"# If None, will keep on going forever\n",
|
||||
"max_iterations: Optional[int] = 3\n",
|
||||
"baby_agi = BabyAGI.from_llm(\n",
|
||||
" llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f7957b51",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"1. Check the weather forecast for San Francisco today\n",
|
||||
"2. Make note of the temperature, humidity, wind speed, and other relevant weather conditions\n",
|
||||
"3. Write a weather report summarizing the forecast\n",
|
||||
"4. Check for any weather alerts or warnings\n",
|
||||
"5. Share the report with the relevant stakeholders\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Check the current temperature in San Francisco\n",
|
||||
"3: Check the current humidity in San Francisco\n",
|
||||
"4: Check the current wind speed in San Francisco\n",
|
||||
"5: Check for any weather alerts or warnings in San Francisco\n",
|
||||
"6: Check the forecast for the next 24 hours in San Francisco\n",
|
||||
"7: Check the forecast for the next 48 hours in San Francisco\n",
|
||||
"8: Check the forecast for the next 72 hours in San Francisco\n",
|
||||
"9: Check the forecast for the next week in San Francisco\n",
|
||||
"10: Check the forecast for the next month in San Francisco\n",
|
||||
"11: Check the forecast for the next 3 months in San Francisco\n",
|
||||
"1: Write a weather report for SF today\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Check the current temperature in San Francisco\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"I will check the current temperature in San Francisco. I will use an online weather service to get the most up-to-date information.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Check the current UV index in San Francisco.\n",
|
||||
"4: Check the current air quality in San Francisco.\n",
|
||||
"5: Check the current precipitation levels in San Francisco.\n",
|
||||
"6: Check the current cloud cover in San Francisco.\n",
|
||||
"7: Check the current barometric pressure in San Francisco.\n",
|
||||
"8: Check the current dew point in San Francisco.\n",
|
||||
"9: Check the current wind direction in San Francisco.\n",
|
||||
"10: Check the current humidity levels in San Francisco.\n",
|
||||
"1: Check the current temperature in San Francisco to the average temperature for this time of year.\n",
|
||||
"2: Check the current visibility in San Francisco.\n",
|
||||
"11: Write a weather report for SF today.\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Check the current UV index in San Francisco.\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The current UV index in San Francisco is moderate. The UV index is expected to remain at moderate levels throughout the day. It is recommended to wear sunscreen and protective clothing when outdoors.\n",
|
||||
"\u001b[91m\u001b[1m\n",
|
||||
"*****TASK ENDING*****\n",
|
||||
"\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'objective': 'Write a weather report for SF today'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"baby_agi({\"objective\": OBJECTIVE})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "898a210b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
389
cookbook/baby_agi_with_agent.ipynb
Normal file
389
cookbook/baby_agi_with_agent.ipynb
Normal file
@@ -0,0 +1,389 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "517a9fd4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BabyAGI with Tools\n",
|
||||
"\n",
|
||||
"This notebook builds on top of [baby agi](baby_agi.html), but shows how you can swap out the execution chain. The previous execution chain was just an LLM which made stuff up. By swapping it out with an agent that has access to tools, we can hopefully get real reliable information"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "556af556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install and Import Required Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c8a354b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_experimental.autonomous_agents import BabyAGI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09f70772",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connect to the Vector Store\n",
|
||||
"\n",
|
||||
"Depending on what vectorstore you use, this step may look different."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "794045d4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install faiss-cpu > /dev/null\n",
|
||||
"%pip install google-search-results > /dev/null\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.vectorstores import FAISS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6e0305eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f3b72bf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the Chains\n",
|
||||
"\n",
|
||||
"BabyAGI relies on three LLM chains:\n",
|
||||
"- Task creation chain to select new tasks to add to the list\n",
|
||||
"- Task prioritization chain to re-prioritize tasks\n",
|
||||
"- Execution Chain to execute the tasks\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"NOTE: in this notebook, the Execution chain will now be an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "b43cd580",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.utilities import SerpAPIWrapper\n",
|
||||
"\n",
|
||||
"todo_prompt = PromptTemplate.from_template(\n",
|
||||
" \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n",
|
||||
")\n",
|
||||
"todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"TODO\",\n",
|
||||
" func=todo_chain.run,\n",
|
||||
" description=\"useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prefix = \"\"\"You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.\"\"\"\n",
|
||||
"suffix = \"\"\"Question: {task}\n",
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"objective\", \"task\", \"context\", \"agent_scratchpad\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4b00ae2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)\n",
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "05ba762e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run the BabyAGI\n",
|
||||
"\n",
|
||||
"Now it's time to create the BabyAGI controller and watch it try to accomplish your objective."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "3d220b69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OBJECTIVE = \"Write a weather report for SF today\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3d69899b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Logging of LLMChains\n",
|
||||
"verbose = False\n",
|
||||
"# If None, will keep on going forever\n",
|
||||
"max_iterations: Optional[int] = 3\n",
|
||||
"baby_agi = BabyAGI.from_llm(\n",
|
||||
" llm=llm,\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" task_execution_chain=agent_executor,\n",
|
||||
" verbose=verbose,\n",
|
||||
" max_iterations=max_iterations,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "f7957b51",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to come up with a todo list\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Write a weather report for SF today\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Research current weather conditions in San Francisco\n",
|
||||
"2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions\n",
|
||||
"3. Analyze data to determine current weather trends\n",
|
||||
"4. Write a brief introduction to the weather report\n",
|
||||
"5. Describe current weather conditions in San Francisco\n",
|
||||
"6. Discuss any upcoming weather changes\n",
|
||||
"7. Summarize the weather report\n",
|
||||
"8. Proofread and edit the report\n",
|
||||
"9. Submit the report\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The todo list for writing a weather report for SF today is: 1. Research current weather conditions in San Francisco; 2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions; 3. Analyze data to determine current weather trends; 4. Write a brief introduction to the weather report; 5. Describe current weather conditions in San Francisco; 6. Discuss any upcoming weather changes; 7. Summarize the weather report; 8. Proofread and edit the report; 9. Submit the report.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"The todo list for writing a weather report for SF today is: 1. Research current weather conditions in San Francisco; 2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions; 3. Analyze data to determine current weather trends; 4. Write a brief introduction to the weather report; 5. Describe current weather conditions in San Francisco; 6. Discuss any upcoming weather changes; 7. Summarize the weather report; 8. Proofread and edit the report; 9. Submit the report.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Gather data on precipitation, cloud cover, and other relevant weather conditions;\n",
|
||||
"3: Analyze data to determine any upcoming weather changes;\n",
|
||||
"4: Research current weather forecasts for San Francisco;\n",
|
||||
"5: Create a visual representation of the weather report;\n",
|
||||
"6: Include relevant images and graphics in the report;\n",
|
||||
"7: Format the report for readability;\n",
|
||||
"8: Publish the report online;\n",
|
||||
"9: Monitor the report for accuracy.\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Gather data on precipitation, cloud cover, and other relevant weather conditions;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to search for current weather conditions in San Francisco\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Current weather conditions in San Francisco\u001b[0m\u001b[36;1m\u001b[1;3mCurrent Weather for Popular Cities ; San Francisco, CA 46 · Partly Cloudy ; Manhattan, NY warning 52 · Cloudy ; Schiller Park, IL (60176) 40 · Sunny ; Boston, MA 54 ...\u001b[0m\u001b[32;1m\u001b[1;3m I need to compile the data into a weather report\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Compile data into a weather report\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Gather data from reliable sources such as the National Weather Service, local weather stations, and other meteorological organizations.\n",
|
||||
"\n",
|
||||
"2. Analyze the data to identify trends and patterns.\n",
|
||||
"\n",
|
||||
"3. Create a chart or graph to visualize the data.\n",
|
||||
"\n",
|
||||
"4. Write a summary of the data and its implications.\n",
|
||||
"\n",
|
||||
"5. Compile the data into a report format.\n",
|
||||
"\n",
|
||||
"6. Proofread the report for accuracy and clarity.\n",
|
||||
"\n",
|
||||
"7. Publish the report to a website or other platform.\n",
|
||||
"\n",
|
||||
"8. Distribute the report to relevant stakeholders.\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Today in San Francisco, the temperature is 46 degrees Fahrenheit with partly cloudy skies. The forecast for the rest of the day is expected to remain partly cloudy.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"Today in San Francisco, the temperature is 46 degrees Fahrenheit with partly cloudy skies. The forecast for the rest of the day is expected to remain partly cloudy.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Format the report for readability;\n",
|
||||
"4: Include relevant images and graphics in the report;\n",
|
||||
"5: Compare the current weather conditions in San Francisco to the forecasted conditions;\n",
|
||||
"6: Identify any potential weather-related hazards in the area;\n",
|
||||
"7: Research historical weather patterns in San Francisco;\n",
|
||||
"8: Identify any potential trends in the weather data;\n",
|
||||
"9: Include relevant data sources in the report;\n",
|
||||
"10: Summarize the weather report in a concise manner;\n",
|
||||
"11: Include a summary of the forecasted weather conditions;\n",
|
||||
"12: Include a summary of the current weather conditions;\n",
|
||||
"13: Include a summary of the historical weather patterns;\n",
|
||||
"14: Include a summary of the potential weather-related hazards;\n",
|
||||
"15: Include a summary of the potential trends in the weather data;\n",
|
||||
"16: Include a summary of the data sources used in the report;\n",
|
||||
"17: Analyze data to determine any upcoming weather changes;\n",
|
||||
"18: Research current weather forecasts for San Francisco;\n",
|
||||
"19: Create a visual representation of the weather report;\n",
|
||||
"20: Publish the report online;\n",
|
||||
"21: Monitor the report for accuracy\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Format the report for readability;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to make sure the report is easy to read;\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Make the report easy to read\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Break up the report into sections with clear headings\n",
|
||||
"2. Use bullet points and numbered lists to organize information\n",
|
||||
"3. Use short, concise sentences\n",
|
||||
"4. Use simple language and avoid jargon\n",
|
||||
"5. Include visuals such as charts, graphs, and diagrams to illustrate points\n",
|
||||
"6. Use bold and italicized text to emphasize key points\n",
|
||||
"7. Include a table of contents and page numbers\n",
|
||||
"8. Use a consistent font and font size throughout the report\n",
|
||||
"9. Include a summary at the end of the report\n",
|
||||
"10. Proofread the report for typos and errors\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The report should be formatted for readability by breaking it up into sections with clear headings, using bullet points and numbered lists to organize information, using short, concise sentences, using simple language and avoiding jargon, including visuals such as charts, graphs, and diagrams to illustrate points, using bold and italicized text to emphasize key points, including a table of contents and page numbers, using a consistent font and font size throughout the report, including a summary at the end of the report, and proofreading the report for typos and errors.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"The report should be formatted for readability by breaking it up into sections with clear headings, using bullet points and numbered lists to organize information, using short, concise sentences, using simple language and avoiding jargon, including visuals such as charts, graphs, and diagrams to illustrate points, using bold and italicized text to emphasize key points, including a table of contents and page numbers, using a consistent font and font size throughout the report, including a summary at the end of the report, and proofreading the report for typos and errors.\n",
|
||||
"\u001b[91m\u001b[1m\n",
|
||||
"*****TASK ENDING*****\n",
|
||||
"\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'objective': 'Write a weather report for SF today'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"baby_agi({\"objective\": OBJECTIVE})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "898a210b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
708
cookbook/camel_role_playing.ipynb
Normal file
708
cookbook/camel_role_playing.ipynb
Normal file
@@ -0,0 +1,708 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CAMEL Role-Playing Autonomous Cooperative Agents\n",
|
||||
"\n",
|
||||
"This is a langchain implementation of paper: \"CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society\".\n",
|
||||
"\n",
|
||||
"Overview:\n",
|
||||
"\n",
|
||||
"The rapid advancement of conversational and chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents and provide insight into their \"cognitive\" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of chat agents, providing a valuable resource for investigating conversational language models. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond.\n",
|
||||
"\n",
|
||||
"The original implementation: https://github.com/lightaime/camel\n",
|
||||
"\n",
|
||||
"Project website: https://www.camel-ai.org/\n",
|
||||
"\n",
|
||||
"Arxiv paper: https://arxiv.org/abs/2303.17760\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import LangChain related modules "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts.chat import (\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define a CAMEL agent helper class"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CAMELAgent:\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" system_message: SystemMessage,\n",
|
||||
" model: ChatOpenAI,\n",
|
||||
" ) -> None:\n",
|
||||
" self.system_message = system_message\n",
|
||||
" self.model = model\n",
|
||||
" self.init_messages()\n",
|
||||
"\n",
|
||||
" def reset(self) -> None:\n",
|
||||
" self.init_messages()\n",
|
||||
" return self.stored_messages\n",
|
||||
"\n",
|
||||
" def init_messages(self) -> None:\n",
|
||||
" self.stored_messages = [self.system_message]\n",
|
||||
"\n",
|
||||
" def update_messages(self, message: BaseMessage) -> List[BaseMessage]:\n",
|
||||
" self.stored_messages.append(message)\n",
|
||||
" return self.stored_messages\n",
|
||||
"\n",
|
||||
" def step(\n",
|
||||
" self,\n",
|
||||
" input_message: HumanMessage,\n",
|
||||
" ) -> AIMessage:\n",
|
||||
" messages = self.update_messages(input_message)\n",
|
||||
"\n",
|
||||
" output_message = self.model(messages)\n",
|
||||
" self.update_messages(output_message)\n",
|
||||
"\n",
|
||||
" return output_message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup OpenAI API key and roles and task for role-playing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
|
||||
"\n",
|
||||
"assistant_role_name = \"Python Programmer\"\n",
|
||||
"user_role_name = \"Stock Trader\"\n",
|
||||
"task = \"Develop a trading bot for the stock market\"\n",
|
||||
"word_limit = 50 # word limit for task brainstorming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a task specify agent for brainstorming and get the specified task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Specified task: Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"task_specifier_sys_msg = SystemMessage(content=\"You can make a task more specific.\")\n",
|
||||
"task_specifier_prompt = \"\"\"Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.\n",
|
||||
"Please make it more specific. Be creative and imaginative.\n",
|
||||
"Please reply with the specified task in {word_limit} words or less. Do not add anything else.\"\"\"\n",
|
||||
"task_specifier_template = HumanMessagePromptTemplate.from_template(\n",
|
||||
" template=task_specifier_prompt\n",
|
||||
")\n",
|
||||
"task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))\n",
|
||||
"task_specifier_msg = task_specifier_template.format_messages(\n",
|
||||
" assistant_role_name=assistant_role_name,\n",
|
||||
" user_role_name=user_role_name,\n",
|
||||
" task=task,\n",
|
||||
" word_limit=word_limit,\n",
|
||||
")[0]\n",
|
||||
"specified_task_msg = task_specify_agent.step(task_specifier_msg)\n",
|
||||
"print(f\"Specified task: {specified_task_msg.content}\")\n",
|
||||
"specified_task = specified_task_msg.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create inception prompts for AI assistant and AI user for role-playing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assistant_inception_prompt = \"\"\"Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!\n",
|
||||
"We share a common interest in collaborating to successfully complete a task.\n",
|
||||
"You must help me to complete the task.\n",
|
||||
"Here is the task: {task}. Never forget our task!\n",
|
||||
"I must instruct you based on your expertise and my needs to complete the task.\n",
|
||||
"\n",
|
||||
"I must give you one instruction at a time.\n",
|
||||
"You must write a specific solution that appropriately completes the requested instruction.\n",
|
||||
"You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.\n",
|
||||
"Do not add anything else other than your solution to my instruction.\n",
|
||||
"You are never supposed to ask me any questions you only answer questions.\n",
|
||||
"You are never supposed to reply with a flake solution. Explain your solutions.\n",
|
||||
"Your solution must be declarative sentences and simple present tense.\n",
|
||||
"Unless I say the task is completed, you should always start with:\n",
|
||||
"\n",
|
||||
"Solution: <YOUR_SOLUTION>\n",
|
||||
"\n",
|
||||
"<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.\n",
|
||||
"Always end <YOUR_SOLUTION> with: Next request.\"\"\"\n",
|
||||
"\n",
|
||||
"user_inception_prompt = \"\"\"Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.\n",
|
||||
"We share a common interest in collaborating to successfully complete a task.\n",
|
||||
"I must help you to complete the task.\n",
|
||||
"Here is the task: {task}. Never forget our task!\n",
|
||||
"You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:\n",
|
||||
"\n",
|
||||
"1. Instruct with a necessary input:\n",
|
||||
"Instruction: <YOUR_INSTRUCTION>\n",
|
||||
"Input: <YOUR_INPUT>\n",
|
||||
"\n",
|
||||
"2. Instruct without any input:\n",
|
||||
"Instruction: <YOUR_INSTRUCTION>\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"The \"Instruction\" describes a task or question. The paired \"Input\" provides further context or information for the requested \"Instruction\".\n",
|
||||
"\n",
|
||||
"You must give me one instruction at a time.\n",
|
||||
"I must write a response that appropriately completes the requested instruction.\n",
|
||||
"I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.\n",
|
||||
"You should instruct me not ask me questions.\n",
|
||||
"Now you must start to instruct me using the two ways described above.\n",
|
||||
"Do not add anything else other than your instruction and the optional corresponding input!\n",
|
||||
"Keep giving me instructions and necessary inputs until you think the task is completed.\n",
|
||||
"When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.\n",
|
||||
"Never say <CAMEL_TASK_DONE> unless my responses have solved your task.\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a helper helper to get system messages for AI assistant and AI user from role names and the task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):\n",
|
||||
" assistant_sys_template = SystemMessagePromptTemplate.from_template(\n",
|
||||
" template=assistant_inception_prompt\n",
|
||||
" )\n",
|
||||
" assistant_sys_msg = assistant_sys_template.format_messages(\n",
|
||||
" assistant_role_name=assistant_role_name,\n",
|
||||
" user_role_name=user_role_name,\n",
|
||||
" task=task,\n",
|
||||
" )[0]\n",
|
||||
"\n",
|
||||
" user_sys_template = SystemMessagePromptTemplate.from_template(\n",
|
||||
" template=user_inception_prompt\n",
|
||||
" )\n",
|
||||
" user_sys_msg = user_sys_template.format_messages(\n",
|
||||
" assistant_role_name=assistant_role_name,\n",
|
||||
" user_role_name=user_role_name,\n",
|
||||
" task=task,\n",
|
||||
" )[0]\n",
|
||||
"\n",
|
||||
" return assistant_sys_msg, user_sys_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create AI assistant agent and AI user agent from obtained system messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assistant_sys_msg, user_sys_msg = get_sys_msgs(\n",
|
||||
" assistant_role_name, user_role_name, specified_task\n",
|
||||
")\n",
|
||||
"assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))\n",
|
||||
"user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))\n",
|
||||
"\n",
|
||||
"# Reset agents\n",
|
||||
"assistant_agent.reset()\n",
|
||||
"user_agent.reset()\n",
|
||||
"\n",
|
||||
"# Initialize chats\n",
|
||||
"user_msg = HumanMessage(\n",
|
||||
" content=(\n",
|
||||
" f\"{user_sys_msg.content}. \"\n",
|
||||
" \"Now start to give me introductions one by one. \"\n",
|
||||
" \"Only reply with Instruction and Input.\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assistant_msg = HumanMessage(content=f\"{assistant_sys_msg.content}\")\n",
|
||||
"assistant_msg = assistant_agent.step(user_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Start role-playing session to solve the task!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Original task prompt:\n",
|
||||
"Develop a trading bot for the stock market\n",
|
||||
"\n",
|
||||
"Specified task prompt:\n",
|
||||
"Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Install the necessary Python libraries for data analysis and trading.\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can install the necessary Python libraries using pip, a package installer for Python. We can install pandas, numpy, matplotlib, and ta-lib for data analysis and trading. We can use the following command to install these libraries:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install pandas numpy matplotlib ta-lib\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Import the necessary libraries in the Python script.\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can import the necessary libraries in the Python script using the import statement. We need to import pandas, numpy, matplotlib, and ta-lib for data analysis and trading. We can use the following code to import these libraries:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import talib as ta\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Load historical stock data into a pandas DataFrame.\n",
|
||||
"Input: The path to the CSV file containing the historical stock data.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can load historical stock data into a pandas DataFrame using the `read_csv()` function from pandas. We need to pass the path to the CSV file containing the historical stock data as an argument to this function. We can use the following code to load the historical stock data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"df = pd.read_csv('path/to/csv/file.csv')\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will load the historical stock data into a pandas DataFrame called `df`. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Preprocess the historical stock data by setting the date column as the index and sorting the DataFrame in ascending order by date.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can preprocess the historical stock data by setting the date column as the index and sorting the DataFrame in ascending order by date using the `set_index()` and `sort_index()` functions from pandas. We can use the following code to preprocess the historical stock data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"df = df.set_index('date')\n",
|
||||
"df = df.sort_index(ascending=True)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will set the date column as the index and sort the DataFrame in ascending order by date. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Calculate the short-term and long-term moving averages for the stock data using the `ta.SMA()` function from ta-lib.\n",
|
||||
"Input: The period for the short-term moving average and the period for the long-term moving average.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can calculate the short-term and long-term moving averages for the stock data using the `ta.SMA()` function from ta-lib. We need to pass the period for the short-term moving average and the period for the long-term moving average as arguments to this function. We can use the following code to calculate the short-term and long-term moving averages:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"short_ma = ta.SMA(df['close'], timeperiod=short_period)\n",
|
||||
"long_ma = ta.SMA(df['close'], timeperiod=long_period)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will calculate the short-term and long-term moving averages for the stock data and store them in the `short_ma` and `long_ma` variables, respectively. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages using the `concat()` function from pandas. We need to pass the historical stock data, the short-term moving average, and the long-term moving average as arguments to this function. We can use the following code to create the new DataFrame:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"new_df = pd.concat([df, short_ma, long_ma], axis=1)\n",
|
||||
"new_df.columns = ['open', 'high', 'low', 'close', 'volume', 'short_ma', 'long_ma']\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new DataFrame called `new_df` that combines the historical stock data with the short-term and long-term moving averages. The columns of the new DataFrame are named 'open', 'high', 'low', 'close', 'volume', 'short_ma', and 'long_ma'. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages. We can use the following code to create the new column:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"new_df['signal'] = np.where(new_df['short_ma'] > new_df['long_ma'], 1, -1)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new column called 'signal' in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages. If the short-term moving average is greater than the long-term moving average, the signal is 1 (buy), otherwise the signal is -1 (sell). Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target.\n",
|
||||
"Input: The stop loss and profit target as percentages.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target. We need to pass the stop loss and profit target as percentages as arguments to this function. We can use the following code to create the new column:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"stop_loss = stop_loss_percent / 100\n",
|
||||
"profit_target = profit_target_percent / 100\n",
|
||||
"\n",
|
||||
"new_df['pnl'] = 0.0\n",
|
||||
"buy_price = 0.0\n",
|
||||
"for i in range(1, len(new_df)):\n",
|
||||
" if new_df['signal'][i] == 1 and new_df['signal'][i-1] == -1:\n",
|
||||
" buy_price = new_df['close'][i]\n",
|
||||
" elif new_df['signal'][i] == -1 and new_df['signal'][i-1] == 1:\n",
|
||||
" sell_price = new_df['close'][i]\n",
|
||||
" if sell_price <= buy_price * (1 - stop_loss):\n",
|
||||
" new_df['pnl'][i] = -stop_loss\n",
|
||||
" elif sell_price >= buy_price * (1 + profit_target):\n",
|
||||
" new_df['pnl'][i] = profit_target\n",
|
||||
" else:\n",
|
||||
" new_df['pnl'][i] = (sell_price - buy_price) / buy_price\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new column called 'pnl' in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target. The stop loss and profit target are calculated based on the stop_loss_percent and profit_target_percent variables, respectively. The buy and sell prices are stored in the buy_price and sell_price variables, respectively. If the sell price is less than or equal to the stop loss, the profit or loss is set to -stop_loss. If the sell price is greater than or equal to the profit target, the profit or loss is set to profit_target. Otherwise, the profit or loss is calculated as (sell_price - buy_price) / buy_price. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Calculate the total profit or loss for all trades.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can calculate the total profit or loss for all trades by summing the values in the 'pnl' column of the DataFrame. We can use the following code to calculate the total profit or loss:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"total_pnl = new_df['pnl'].sum()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will calculate the total profit or loss for all trades and store it in the total_pnl variable. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Visualize the stock data, short-term moving average, and long-term moving average using a line chart.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can visualize the stock data, short-term moving average, and long-term moving average using a line chart using the `plot()` function from pandas. We can use the following code to visualize the data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.plot(new_df.index, new_df['short_ma'], label='Short MA')\n",
|
||||
"plt.plot(new_df.index, new_df['long_ma'], label='Long MA')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Stock Data with Moving Averages')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a line chart that visualizes the stock data, short-term moving average, and long-term moving average. The x-axis represents the date and the y-axis represents the price. The chart also includes a legend that labels each line. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Visualize the buy and sell signals using a scatter plot.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can visualize the buy and sell signals using a scatter plot using the `scatter()` function from matplotlib. We can use the following code to visualize the signals:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"buy_signals = new_df[new_df['signal'] == 1]\n",
|
||||
"sell_signals = new_df[new_df['signal'] == -1]\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.scatter(buy_signals.index, buy_signals['close'], label='Buy', marker='^', color='green')\n",
|
||||
"plt.scatter(sell_signals.index, sell_signals['close'], label='Sell', marker='v', color='red')\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Buy and Sell Signals')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a scatter plot that visualizes the buy and sell signals. The x-axis represents the date and the y-axis represents the price. The chart also includes a legend that labels each point. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Print the total profit or loss for all trades.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can print the total profit or loss for all trades using the `print()` function. We can use the following code to print the total profit or loss:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"print('Total Profit/Loss: {:.2%}'.format(total_pnl))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will print the total profit or loss for all trades in percentage format with two decimal places. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Finalize the code by putting all the code snippets together in a single Python script.\n",
|
||||
"Input: The necessary inputs for each code snippet.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: Here is the complete Python script that puts all the code snippets together:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import talib as ta\n",
|
||||
"\n",
|
||||
"# Load historical stock data into a pandas DataFrame\n",
|
||||
"df = pd.read_csv('path/to/csv/file.csv')\n",
|
||||
"\n",
|
||||
"# Preprocess the historical stock data\n",
|
||||
"df = df.set_index('date')\n",
|
||||
"df = df.sort_index(ascending=True)\n",
|
||||
"\n",
|
||||
"# Calculate the short-term and long-term moving averages\n",
|
||||
"short_period = 10\n",
|
||||
"long_period = 30\n",
|
||||
"short_ma = ta.SMA(df['close'], timeperiod=short_period)\n",
|
||||
"long_ma = ta.SMA(df['close'], timeperiod=long_period)\n",
|
||||
"\n",
|
||||
"# Create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages\n",
|
||||
"new_df = pd.concat([df, short_ma, long_ma], axis=1)\n",
|
||||
"new_df.columns = ['open', 'high', 'low', 'close', 'volume', 'short_ma', 'long_ma']\n",
|
||||
"\n",
|
||||
"# Create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages\n",
|
||||
"new_df['signal'] = np.where(new_df['short_ma'] > new_df['long_ma'], 1, -1)\n",
|
||||
"\n",
|
||||
"# Create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target\n",
|
||||
"stop_loss_percent = 5\n",
|
||||
"profit_target_percent = 10\n",
|
||||
"stop_loss = stop_loss_percent / 100\n",
|
||||
"profit_target = profit_target_percent / 100\n",
|
||||
"new_df['pnl'] = 0.0\n",
|
||||
"buy_price = 0.0\n",
|
||||
"for i in range(1, len(new_df)):\n",
|
||||
" if new_df['signal'][i] == 1 and new_df['signal'][i-1] == -1:\n",
|
||||
" buy_price = new_df['close'][i]\n",
|
||||
" elif new_df['signal'][i] == -1 and new_df['signal'][i-1] == 1:\n",
|
||||
" sell_price = new_df['close'][i]\n",
|
||||
" if sell_price <= buy_price * (1 - stop_loss):\n",
|
||||
" new_df['pnl'][i] = -stop_loss\n",
|
||||
" elif sell_price >= buy_price * (1 + profit_target):\n",
|
||||
" new_df['pnl'][i] = profit_target\n",
|
||||
" else:\n",
|
||||
" new_df['pnl'][i] = (sell_price - buy_price) / buy_price\n",
|
||||
"\n",
|
||||
"# Calculate the total profit or loss for all trades\n",
|
||||
"total_pnl = new_df['pnl'].sum()\n",
|
||||
"\n",
|
||||
"# Visualize the stock data, short-term moving average, and long-term moving average using a line chart\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.plot(new_df.index, new_df['short_ma'], label='Short MA')\n",
|
||||
"plt.plot(new_df.index, new_df['long_ma'], label='Long MA')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Stock Data with Moving Averages')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# Visualize the buy and sell signals using a scatter plot\n",
|
||||
"buy_signals = new_df[new_df['signal'] == 1]\n",
|
||||
"sell_signals = new_df[new_df['signal'] == -1]\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.scatter(buy_signals.index, buy_signals['close'], label='Buy', marker='^', color='green')\n",
|
||||
"plt.scatter(sell_signals.index, sell_signals['close'], label='Sell', marker='v', color='red')\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Buy and Sell Signals')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# Print the total profit or loss for all trades\n",
|
||||
"print('Total Profit/Loss: {:.2%}'.format(total_pnl))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You need to replace the path/to/csv/file.csv with the actual path to the CSV file containing the historical stock data. You can also adjust the short_period, long_period, stop_loss_percent, and profit_target_percent variables to suit your needs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"<CAMEL_TASK_DONE>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Great! Let me know if you need any further assistance.\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f\"Original task prompt:\\n{task}\\n\")\n",
|
||||
"print(f\"Specified task prompt:\\n{specified_task}\\n\")\n",
|
||||
"\n",
|
||||
"chat_turn_limit, n = 30, 0\n",
|
||||
"while n < chat_turn_limit:\n",
|
||||
" n += 1\n",
|
||||
" user_ai_msg = user_agent.step(assistant_msg)\n",
|
||||
" user_msg = HumanMessage(content=user_ai_msg.content)\n",
|
||||
" print(f\"AI User ({user_role_name}):\\n\\n{user_msg.content}\\n\\n\")\n",
|
||||
"\n",
|
||||
" assistant_ai_msg = assistant_agent.step(user_msg)\n",
|
||||
" assistant_msg = HumanMessage(content=assistant_ai_msg.content)\n",
|
||||
" print(f\"AI Assistant ({assistant_role_name}):\\n\\n{assistant_msg.content}\\n\\n\")\n",
|
||||
" if \"<CAMEL_TASK_DONE>\" in user_msg.content:\n",
|
||||
" break"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "camel",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
692
cookbook/causal_program_aided_language_model.ipynb
Normal file
692
cookbook/causal_program_aided_language_model.ipynb
Normal file
File diff suppressed because one or more lines are too long
1180
cookbook/code-analysis-deeplake.ipynb
Normal file
1180
cookbook/code-analysis-deeplake.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -9,8 +9,8 @@
|
||||
"\n",
|
||||
"This notebook combines two concepts in order to build a custom agent that can interact with AI Plugins:\n",
|
||||
"\n",
|
||||
"1. [Custom Agent with Retrieval](../../modules/agents/agents/custom_agent_with_plugin_retrieval.ipynb): This introduces the concept of retrieving many tools, which is useful when trying to work with arbitrarily many plugins.\n",
|
||||
"2. [Natural Language API Chains](../../modules/chains/examples/openapi.ipynb): This creates Natural Language wrappers around OpenAPI endpoints. This is useful because (1) plugins use OpenAPI endpoints under the hood, (2) wrapping them in an NLAChain allows the router agent to call it more easily.\n",
|
||||
"1. [Custom Agent with Tool Retrieval](/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.html): This introduces the concept of retrieving many tools, which is useful when trying to work with arbitrarily many plugins.\n",
|
||||
"2. [Natural Language API Chains](/docs/use_cases/apis/openapi.html): This creates Natural Language wrappers around OpenAPI endpoints. This is useful because (1) plugins use OpenAPI endpoints under the hood, (2) wrapping them in an NLAChain allows the router agent to call it more easily.\n",
|
||||
"\n",
|
||||
"The novel idea introduced in this notebook is the idea of using retrieval to select not the tools explicitly, but the set of OpenAPI specs to use. We can then generate tools from those OpenAPI specs. The use case for this is when trying to get agents to use plugins. It may be more efficient to choose plugins first, then the endpoints, rather than the endpoints directly. This is because the plugins may contain more useful information for selection."
|
||||
]
|
||||
@@ -32,14 +32,20 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain import OpenAI, SerpAPIWrapper, LLMChain\n",
|
||||
"from typing import List, Union\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"import re\n",
|
||||
"from typing import Union\n",
|
||||
"\n",
|
||||
"from langchain.agents import (\n",
|
||||
" AgentExecutor,\n",
|
||||
" AgentOutputParser,\n",
|
||||
" LLMSingleActionAgent,\n",
|
||||
")\n",
|
||||
"from langchain.agents.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain.tools.plugin import AIPlugin\n",
|
||||
"import re"
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.tools.plugin import AIPlugin"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -108,9 +114,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.schema import Document"
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain.vectorstores import FAISS"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -138,15 +144,17 @@
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"docs = [\n",
|
||||
" Document(page_content=plugin.description_for_model, \n",
|
||||
" metadata={\"plugin_name\": plugin.name_for_model}\n",
|
||||
" )\n",
|
||||
" Document(\n",
|
||||
" page_content=plugin.description_for_model,\n",
|
||||
" metadata={\"plugin_name\": plugin.name_for_model},\n",
|
||||
" )\n",
|
||||
" for plugin in AI_PLUGINS\n",
|
||||
"]\n",
|
||||
"vector_store = FAISS.from_documents(docs, embeddings)\n",
|
||||
"toolkits_dict = {plugin.name_for_model: \n",
|
||||
" NLAToolkit.from_llm_and_ai_plugin(llm, plugin) \n",
|
||||
" for plugin in AI_PLUGINS}"
|
||||
"toolkits_dict = {\n",
|
||||
" plugin.name_for_model: NLAToolkit.from_llm_and_ai_plugin(llm, plugin)\n",
|
||||
" for plugin in AI_PLUGINS\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -158,6 +166,7 @@
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
@@ -303,6 +312,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Set up a prompt template\n",
|
||||
"class CustomPromptTemplate(StringPromptTemplate):\n",
|
||||
" # The template to use\n",
|
||||
@@ -310,7 +321,7 @@
|
||||
" ############## NEW ######################\n",
|
||||
" # The list of tools available\n",
|
||||
" tools_getter: Callable\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def format(self, **kwargs) -> str:\n",
|
||||
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
|
||||
" # Format them in a particular way\n",
|
||||
@@ -324,7 +335,9 @@
|
||||
" ############## NEW ######################\n",
|
||||
" tools = self.tools_getter(kwargs[\"input\"])\n",
|
||||
" # Create a tools variable from the list of tools provided\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in tools])\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join(\n",
|
||||
" [f\"{tool.name}: {tool.description}\" for tool in tools]\n",
|
||||
" )\n",
|
||||
" # Create a list of tool names for the tools provided\n",
|
||||
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in tools])\n",
|
||||
" return self.template.format(**kwargs)"
|
||||
@@ -342,7 +355,7 @@
|
||||
" tools_getter=get_tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"]\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -364,7 +377,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" \n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
@@ -382,7 +394,9 @@
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" # Return the action and action input\n",
|
||||
" return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)"
|
||||
" return AgentAction(\n",
|
||||
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -435,10 +449,10 @@
|
||||
"source": [
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain, \n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"], \n",
|
||||
" allowed_tools=tool_names\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -459,7 +473,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -525,7 +541,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Plug-and-Plai\n",
|
||||
"\n",
|
||||
"This notebook builds upon the idea of [tool retrieval](custom_agent_with_plugin_retrieval.html), but pulls all tools from `plugnplai` - a directory of AI Plugins."
|
||||
"This notebook builds upon the idea of [plugin retrieval](./custom_agent_with_plugin_retrieval.html), but pulls all tools from `plugnplai` - a directory of AI Plugins."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -56,15 +56,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain import OpenAI, SerpAPIWrapper, LLMChain\n",
|
||||
"from typing import List, Union\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.agents.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain.tools.plugin import AIPlugin\n",
|
||||
"import re\n",
|
||||
"import plugnplai"
|
||||
"from typing import Union\n",
|
||||
"\n",
|
||||
"import plugnplai\n",
|
||||
"from langchain.agents import (\n",
|
||||
" AgentExecutor,\n",
|
||||
" AgentOutputParser,\n",
|
||||
" LLMSingleActionAgent,\n",
|
||||
")\n",
|
||||
"from langchain.agents.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.tools.plugin import AIPlugin"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -106,10 +112,10 @@
|
||||
"urls = plugnplai.get_plugins()\n",
|
||||
"\n",
|
||||
"# Get ChatGPT plugins - only ChatGPT verified plugins\n",
|
||||
"urls = plugnplai.get_plugins(filter = 'ChatGPT')\n",
|
||||
"urls = plugnplai.get_plugins(filter=\"ChatGPT\")\n",
|
||||
"\n",
|
||||
"# Get working plugins - only tested plugins (in progress)\n",
|
||||
"urls = plugnplai.get_plugins(filter = 'working')\n",
|
||||
"urls = plugnplai.get_plugins(filter=\"working\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI_PLUGINS = [AIPlugin.from_url(url + \"/.well-known/ai-plugin.json\") for url in urls]"
|
||||
@@ -132,9 +138,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.schema import Document"
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain.vectorstores import FAISS"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -162,15 +168,17 @@
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"docs = [\n",
|
||||
" Document(page_content=plugin.description_for_model, \n",
|
||||
" metadata={\"plugin_name\": plugin.name_for_model}\n",
|
||||
" )\n",
|
||||
" Document(\n",
|
||||
" page_content=plugin.description_for_model,\n",
|
||||
" metadata={\"plugin_name\": plugin.name_for_model},\n",
|
||||
" )\n",
|
||||
" for plugin in AI_PLUGINS\n",
|
||||
"]\n",
|
||||
"vector_store = FAISS.from_documents(docs, embeddings)\n",
|
||||
"toolkits_dict = {plugin.name_for_model: \n",
|
||||
" NLAToolkit.from_llm_and_ai_plugin(llm, plugin) \n",
|
||||
" for plugin in AI_PLUGINS}"
|
||||
"toolkits_dict = {\n",
|
||||
" plugin.name_for_model: NLAToolkit.from_llm_and_ai_plugin(llm, plugin)\n",
|
||||
" for plugin in AI_PLUGINS\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -182,6 +190,7 @@
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
@@ -327,6 +336,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Set up a prompt template\n",
|
||||
"class CustomPromptTemplate(StringPromptTemplate):\n",
|
||||
" # The template to use\n",
|
||||
@@ -334,7 +345,7 @@
|
||||
" ############## NEW ######################\n",
|
||||
" # The list of tools available\n",
|
||||
" tools_getter: Callable\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def format(self, **kwargs) -> str:\n",
|
||||
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
|
||||
" # Format them in a particular way\n",
|
||||
@@ -348,7 +359,9 @@
|
||||
" ############## NEW ######################\n",
|
||||
" tools = self.tools_getter(kwargs[\"input\"])\n",
|
||||
" # Create a tools variable from the list of tools provided\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in tools])\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join(\n",
|
||||
" [f\"{tool.name}: {tool.description}\" for tool in tools]\n",
|
||||
" )\n",
|
||||
" # Create a list of tool names for the tools provided\n",
|
||||
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in tools])\n",
|
||||
" return self.template.format(**kwargs)"
|
||||
@@ -366,7 +379,7 @@
|
||||
" tools_getter=get_tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"]\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -388,7 +401,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" \n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
@@ -406,7 +418,9 @@
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" # Return the action and action input\n",
|
||||
" return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)"
|
||||
" return AgentAction(\n",
|
||||
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -459,10 +473,10 @@
|
||||
"source": [
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain, \n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"], \n",
|
||||
" allowed_tools=tool_names\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -483,7 +497,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -549,7 +565,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
273
cookbook/databricks_sql_db.ipynb
Normal file
273
cookbook/databricks_sql_db.ipynb
Normal file
@@ -0,0 +1,273 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "707d13a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Databricks\n",
|
||||
"\n",
|
||||
"This notebook covers how to connect to the [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the SQLDatabase wrapper of LangChain.\n",
|
||||
"It is broken into 3 parts: installation and setup, connecting to Databricks, and examples."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0076d072",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation and Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "739b489b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install databricks-sql-connector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73113163",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connecting to Databricks\n",
|
||||
"\n",
|
||||
"You can connect to [Databricks runtimes](https://docs.databricks.com/runtime/index.html) and [Databricks SQL](https://www.databricks.com/product/databricks-sql) using the `SQLDatabase.from_databricks()` method.\n",
|
||||
"\n",
|
||||
"### Syntax\n",
|
||||
"```python\n",
|
||||
"SQLDatabase.from_databricks(\n",
|
||||
" catalog: str,\n",
|
||||
" schema: str,\n",
|
||||
" host: Optional[str] = None,\n",
|
||||
" api_token: Optional[str] = None,\n",
|
||||
" warehouse_id: Optional[str] = None,\n",
|
||||
" cluster_id: Optional[str] = None,\n",
|
||||
" engine_args: Optional[dict] = None,\n",
|
||||
" **kwargs: Any)\n",
|
||||
"```\n",
|
||||
"### Required Parameters\n",
|
||||
"* `catalog`: The catalog name in the Databricks database.\n",
|
||||
"* `schema`: The schema name in the catalog.\n",
|
||||
"\n",
|
||||
"### Optional Parameters\n",
|
||||
"There following parameters are optional. When executing the method in a Databricks notebook, you don't need to provide them in most of the cases.\n",
|
||||
"* `host`: The Databricks workspace hostname, excluding 'https://' part. Defaults to 'DATABRICKS_HOST' environment variable or current workspace if in a Databricks notebook.\n",
|
||||
"* `api_token`: The Databricks personal access token for accessing the Databricks SQL warehouse or the cluster. Defaults to 'DATABRICKS_TOKEN' environment variable or a temporary one is generated if in a Databricks notebook.\n",
|
||||
"* `warehouse_id`: The warehouse ID in the Databricks SQL.\n",
|
||||
"* `cluster_id`: The cluster ID in the Databricks Runtime. If running in a Databricks notebook and both 'warehouse_id' and 'cluster_id' are None, it uses the ID of the cluster the notebook is attached to.\n",
|
||||
"* `engine_args`: The arguments to be used when connecting Databricks.\n",
|
||||
"* `**kwargs`: Additional keyword arguments for the `SQLDatabase.from_uri` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b11c7e48",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8102bca0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Connecting to Databricks with SQLDatabase wrapper\n",
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_databricks(catalog=\"samples\", schema=\"nyctaxi\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9dd36f58",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Creating a OpenAI Chat LLM wrapper\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5b5c5f1a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### SQL Chain example\n",
|
||||
"\n",
|
||||
"This example demonstrates the use of the [SQL Chain](https://python.langchain.com/en/latest/modules/chains/examples/sqlite.html) for answering a question over a Databricks database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "36f2270b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import SQLDatabaseChain\n",
|
||||
"\n",
|
||||
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4e2b5f25",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SQLDatabaseChain chain...\u001b[0m\n",
|
||||
"What is the average duration of taxi rides that start between midnight and 6am?\n",
|
||||
"SQLQuery:\u001b[32;1m\u001b[1;3mSELECT AVG(UNIX_TIMESTAMP(tpep_dropoff_datetime) - UNIX_TIMESTAMP(tpep_pickup_datetime)) as avg_duration\n",
|
||||
"FROM trips\n",
|
||||
"WHERE HOUR(tpep_pickup_datetime) >= 0 AND HOUR(tpep_pickup_datetime) < 6\u001b[0m\n",
|
||||
"SQLResult: \u001b[33;1m\u001b[1;3m[(987.8122786304605,)]\u001b[0m\n",
|
||||
"Answer:\u001b[32;1m\u001b[1;3mThe average duration of taxi rides that start between midnight and 6am is 987.81 seconds.\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The average duration of taxi rides that start between midnight and 6am is 987.81 seconds.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db_chain.run(\n",
|
||||
" \"What is the average duration of taxi rides that start between midnight and 6am?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e496d5e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### SQL Database Agent example\n",
|
||||
"\n",
|
||||
"This example demonstrates the use of the [SQL Database Agent](/docs/integrations/toolkits/sql_database.html) for answering questions over a Databricks database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "9918e86a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import create_sql_agent\n",
|
||||
"from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n",
|
||||
"\n",
|
||||
"toolkit = SQLDatabaseToolkit(db=db, llm=llm)\n",
|
||||
"agent = create_sql_agent(llm=llm, toolkit=toolkit, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c484a76e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction: list_tables_sql_db\n",
|
||||
"Action Input: \u001b[0m\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3mtrips\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI should check the schema of the trips table to see if it has the necessary columns for trip distance and duration.\n",
|
||||
"Action: schema_sql_db\n",
|
||||
"Action Input: trips\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m\n",
|
||||
"CREATE TABLE trips (\n",
|
||||
"\ttpep_pickup_datetime TIMESTAMP, \n",
|
||||
"\ttpep_dropoff_datetime TIMESTAMP, \n",
|
||||
"\ttrip_distance FLOAT, \n",
|
||||
"\tfare_amount FLOAT, \n",
|
||||
"\tpickup_zip INT, \n",
|
||||
"\tdropoff_zip INT\n",
|
||||
") USING DELTA\n",
|
||||
"\n",
|
||||
"/*\n",
|
||||
"3 rows from trips table:\n",
|
||||
"tpep_pickup_datetime\ttpep_dropoff_datetime\ttrip_distance\tfare_amount\tpickup_zip\tdropoff_zip\n",
|
||||
"2016-02-14 16:52:13+00:00\t2016-02-14 17:16:04+00:00\t4.94\t19.0\t10282\t10171\n",
|
||||
"2016-02-04 18:44:19+00:00\t2016-02-04 18:46:00+00:00\t0.28\t3.5\t10110\t10110\n",
|
||||
"2016-02-17 17:13:57+00:00\t2016-02-17 17:17:55+00:00\t0.7\t5.0\t10103\t10023\n",
|
||||
"*/\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe trips table has the necessary columns for trip distance and duration. I will write a query to find the longest trip distance and its duration.\n",
|
||||
"Action: query_checker_sql_db\n",
|
||||
"Action Input: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001b[0m\n",
|
||||
"Observation: \u001b[31;1m\u001b[1;3mSELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe query is correct. I will now execute it to find the longest trip distance and its duration.\n",
|
||||
"Action: query_sql_db\n",
|
||||
"Action Input: SELECT trip_distance, tpep_dropoff_datetime - tpep_pickup_datetime as duration FROM trips ORDER BY trip_distance DESC LIMIT 1\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m[(30.6, '0 00:43:31.000000000')]\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the final answer.\n",
|
||||
"Final Answer: The longest trip distance is 30.6 miles and it took 43 minutes and 31 seconds.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The longest trip distance is 30.6 miles and it took 43 minutes and 31 seconds.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What is the longest trip distance and how long did it take?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
256
cookbook/deeplake_semantic_search_over_chat.ipynb
Normal file
256
cookbook/deeplake_semantic_search_over_chat.ipynb
Normal file
@@ -0,0 +1,256 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# QA using Activeloop's DeepLake\n",
|
||||
"In this tutorial, we are going to use Langchain + Activeloop's Deep Lake with GPT4 to semantically search and ask questions over a group chat.\n",
|
||||
"\n",
|
||||
"View a working demo [here](https://twitter.com/thisissukh_/status/1647223328363679745)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Install required packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!python3 -m pip install --upgrade langchain 'deeplake[enterprise]' openai tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Add API keys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.text_splitter import (\n",
|
||||
" CharacterTextSplitter,\n",
|
||||
" RecursiveCharacterTextSplitter,\n",
|
||||
")\n",
|
||||
"from langchain.vectorstores import DeepLake\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",
|
||||
"os.environ[\"ACTIVELOOP_TOKEN\"] = activeloop_token\n",
|
||||
"os.environ[\"ACTIVELOOP_ORG\"] = getpass.getpass(\"Activeloop Org:\")\n",
|
||||
"\n",
|
||||
"org_id = os.environ[\"ACTIVELOOP_ORG\"]\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"dataset_path = \"hub://\" + org_id + \"/data\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"## 2. Create sample data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can generate a sample group chat conversation using ChatGPT with this prompt:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"Generate a group chat conversation with three friends talking about their day, referencing real places and fictional names. Make it funny and as detailed as possible.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"I've already generated such a chat in `messages.txt`. We can keep it simple and use this for our example.\n",
|
||||
"\n",
|
||||
"## 3. Ingest chat embeddings\n",
|
||||
"\n",
|
||||
"We load the messages in the text file, chunk and upload to ActiveLoop Vector store."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='Participants:\\n\\nJerry: Loves movies and is a bit of a klutz.\\nSamantha: Enthusiastic about food and always trying new restaurants.\\nBarry: A nature lover, but always manages to get lost.\\nJerry: Hey, guys! You won\\'t believe what happened to me at the Times Square AMC theater. I tripped over my own feet and spilled popcorn everywhere! 🍿💥\\n\\nSamantha: LOL, that\\'s so you, Jerry! Was the floor buttery enough for you to ice skate on after that? 😂\\n\\nBarry: Sounds like a regular Tuesday for you, Jerry. Meanwhile, I tried to find that new hiking trail in Central Park. You know, the one that\\'s supposed to be impossible to get lost on? Well, guess what...\\n\\nJerry: You found a hidden treasure?\\n\\nBarry: No, I got lost. AGAIN. 🧭🙄\\n\\nSamantha: Barry, you\\'d get lost in your own backyard! But speaking of treasures, I found this new sushi place in Little Tokyo. \"Samantha\\'s Sushi Symphony\" it\\'s called. Coincidence? I think not!\\n\\nJerry: Maybe they named it after your ability to eat your body weight in sushi. 🍣', metadata={}), Document(page_content='Barry: How do you even FIND all these places, Samantha?\\n\\nSamantha: Simple, I don\\'t rely on Barry\\'s navigation skills. 😉 But seriously, the wasabi there was hotter than Jerry\\'s love for Marvel movies!\\n\\nJerry: Hey, nothing wrong with a little superhero action. By the way, did you guys see the new \"Captain Crunch: Breakfast Avenger\" trailer?\\n\\nSamantha: Captain Crunch? Are you sure you didn\\'t get that from one of your Saturday morning cereal binges?\\n\\nBarry: Yeah, and did he defeat his arch-enemy, General Mills? 😆\\n\\nJerry: Ha-ha, very funny. Anyway, that sushi place sounds awesome, Samantha. Next time, let\\'s go together, and maybe Barry can guide us... if we want a city-wide tour first.\\n\\nBarry: As long as we\\'re not hiking, I\\'ll get us there... eventually. 😅\\n\\nSamantha: It\\'s a date! But Jerry, you\\'re banned from carrying any food items.\\n\\nJerry: Deal! Just promise me no wasabi challenges. I don\\'t want to end up like the time I tried Sriracha ice cream.', metadata={}), Document(page_content=\"Barry: Wait, what happened with Sriracha ice cream?\\n\\nJerry: Let's just say it was a hot situation. Literally. 🔥\\n\\nSamantha: 🤣 I still have the video!\\n\\nJerry: Samantha, if you value our friendship, that video will never see the light of day.\\n\\nSamantha: No promises, Jerry. No promises. 🤐😈\\n\\nBarry: I foresee a fun weekend ahead! 🎉\", metadata={})]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Your Deep Lake dataset has been successfully created!\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\\"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dataset(path='hub://adilkhan/data', tensors=['embedding', 'id', 'metadata', 'text'])\n",
|
||||
"\n",
|
||||
" tensor htype shape dtype compression\n",
|
||||
" ------- ------- ------- ------- ------- \n",
|
||||
" embedding embedding (3, 1536) float32 None \n",
|
||||
" id text (3, 1) str None \n",
|
||||
" metadata json (3, 1) str None \n",
|
||||
" text text (3, 1) str None \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" \r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with open(\"messages.txt\") as f:\n",
|
||||
" state_of_the_union = f.read()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"pages = text_splitter.split_text(state_of_the_union)\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.create_documents(pages)\n",
|
||||
"\n",
|
||||
"print(texts)\n",
|
||||
"\n",
|
||||
"dataset_path = \"hub://\" + org_id + \"/data\"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"db = DeepLake.from_documents(\n",
|
||||
" texts, embeddings, dataset_path=dataset_path, overwrite=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`Optional`: You can also use Deep Lake's Managed Tensor Database as a hosting service and run queries there. In order to do so, it is necessary to specify the runtime parameter as {'tensor_db': True} during the creation of the vector store. This configuration enables the execution of queries on the Managed Tensor Database, rather than on the client side. It should be noted that this functionality is not applicable to datasets stored locally or in-memory. In the event that a vector store has already been created outside of the Managed Tensor Database, it is possible to transfer it to the Managed Tensor Database by following the prescribed steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# with open(\"messages.txt\") as f:\n",
|
||||
"# state_of_the_union = f.read()\n",
|
||||
"# text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"# pages = text_splitter.split_text(state_of_the_union)\n",
|
||||
"\n",
|
||||
"# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)\n",
|
||||
"# texts = text_splitter.create_documents(pages)\n",
|
||||
"\n",
|
||||
"# print(texts)\n",
|
||||
"\n",
|
||||
"# dataset_path = \"hub://\" + org + \"/data\"\n",
|
||||
"# embeddings = OpenAIEmbeddings()\n",
|
||||
"# db = DeepLake.from_documents(\n",
|
||||
"# texts, embeddings, dataset_path=dataset_path, overwrite=True, runtime={\"tensor_db\": True}\n",
|
||||
"# )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Ask questions\n",
|
||||
"\n",
|
||||
"Now we can ask a question and get an answer back with a semantic search:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = DeepLake(dataset_path=dataset_path, read_only=True, embedding=embeddings)\n",
|
||||
"\n",
|
||||
"retriever = db.as_retriever()\n",
|
||||
"retriever.search_kwargs[\"distance_metric\"] = \"cos\"\n",
|
||||
"retriever.search_kwargs[\"k\"] = 4\n",
|
||||
"\n",
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=OpenAI(), chain_type=\"stuff\", retriever=retriever, return_source_documents=False\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# What was the restaurant the group was talking about called?\n",
|
||||
"query = input(\"Enter query:\")\n",
|
||||
"\n",
|
||||
"# The Hungry Lobster\n",
|
||||
"ans = qa({\"query\": query})\n",
|
||||
"\n",
|
||||
"print(ans)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
156
cookbook/elasticsearch_db_qa.ipynb
Normal file
156
cookbook/elasticsearch_db_qa.ipynb
Normal file
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Elasticsearch\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/qa_structured/integrations/elasticsearch.ipynb)\n",
|
||||
"\n",
|
||||
"We can use LLMs to interact with Elasticsearch analytics databases in natural language.\n",
|
||||
"\n",
|
||||
"This chain builds search queries via the Elasticsearch DSL API (filters and aggregations).\n",
|
||||
"\n",
|
||||
"The Elasticsearch client must have permissions for index listing, mapping description and search queries.\n",
|
||||
"\n",
|
||||
"See [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html) for instructions on how to run Elasticsearch locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain langchain-experimental openai elasticsearch\n",
|
||||
"\n",
|
||||
"# Set env var OPENAI_API_KEY or load from a .env file\n",
|
||||
"# import dotenv\n",
|
||||
"\n",
|
||||
"# dotenv.load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from elasticsearch import Elasticsearch\n",
|
||||
"from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize Elasticsearch python client.\n",
|
||||
"# See https://elasticsearch-py.readthedocs.io/en/v8.8.2/api.html#elasticsearch.Elasticsearch\n",
|
||||
"ELASTIC_SEARCH_SERVER = \"https://elastic:pass@localhost:9200\"\n",
|
||||
"db = Elasticsearch(ELASTIC_SEARCH_SERVER)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Uncomment the next cell to initially populate your db."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# customers = [\n",
|
||||
"# {\"firstname\": \"Jennifer\", \"lastname\": \"Walters\"},\n",
|
||||
"# {\"firstname\": \"Monica\",\"lastname\":\"Rambeau\"},\n",
|
||||
"# {\"firstname\": \"Carol\",\"lastname\":\"Danvers\"},\n",
|
||||
"# {\"firstname\": \"Wanda\",\"lastname\":\"Maximoff\"},\n",
|
||||
"# {\"firstname\": \"Jennifer\",\"lastname\":\"Takeda\"},\n",
|
||||
"# ]\n",
|
||||
"# for i, customer in enumerate(customers):\n",
|
||||
"# db.create(index=\"customers\", document=customer, id=i)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
|
||||
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What are the first names of all the customers?\"\n",
|
||||
"chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can customize the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"PROMPT_TEMPLATE = \"\"\"Given an input question, create a syntactically correct Elasticsearch query to run. Unless the user specifies in their question a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.\n",
|
||||
"\n",
|
||||
"Unless told to do not query for all the columns from a specific index, only ask for a the few relevant columns given the question.\n",
|
||||
"\n",
|
||||
"Pay attention to use only the column names that you can see in the mapping description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which index. Return the query as valid json.\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: Question here\n",
|
||||
"ESQuery: Elasticsearch Query formatted as json\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate.from_template(\n",
|
||||
" PROMPT_TEMPLATE,\n",
|
||||
")\n",
|
||||
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, query_prompt=PROMPT)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
214
cookbook/extraction_openai_tools.ipynb
Normal file
214
cookbook/extraction_openai_tools.ipynb
Normal file
@@ -0,0 +1,214 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2def22ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Extraction with OpenAI Tools\n",
|
||||
"\n",
|
||||
"Performing extraction has never been easier! OpenAI's tool calling ability is the perfect thing to use as it allows for extracting multiple different elements from text that are different types. \n",
|
||||
"\n",
|
||||
"Models after 1106 use tools and support \"parallel function calling\" which makes this super easy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "5c628496",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain.chains.openai_tools import create_extraction_chain_pydantic\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.pydantic_v1 import BaseModel"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "afe9657b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Make sure to use a recent model that supports tools\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "bc0ca3b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Pydantic is an easy way to define a schema\n",
|
||||
"class Person(BaseModel):\n",
|
||||
" \"\"\"Information about people to extract.\"\"\"\n",
|
||||
"\n",
|
||||
" name: str\n",
|
||||
" age: Optional[int] = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "2036af68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain_pydantic(Person, model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "1748ad21",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Person(name='jane', age=2), Person(name='bob', age=3)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": \"jane is 2 and bob is 3\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "c8262ce5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Let's define another element\n",
|
||||
"class Class(BaseModel):\n",
|
||||
" \"\"\"Information about classes to extract.\"\"\"\n",
|
||||
"\n",
|
||||
" teacher: str\n",
|
||||
" students: List[str]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "4973c104",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain_pydantic([Person, Class], model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "e976a15e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Person(name='jane', age=2),\n",
|
||||
" Person(name='bob', age=3),\n",
|
||||
" Class(teacher='Mrs Sampson', students=['jane', 'bob'])]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": \"jane is 2 and bob is 3 and they are in Mrs Sampson's class\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6575a7d6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Under the hood\n",
|
||||
"\n",
|
||||
"Under the hood, this is a simple chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b8ba83e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"from typing import Union, List, Type, Optional\n",
|
||||
"\n",
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
|
||||
"from langchain.schema.runnable import Runnable\n",
|
||||
"from langchain.pydantic_v1 import BaseModel\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema.messages import SystemMessage\n",
|
||||
"from langchain.schema.language_model import BaseLanguageModel\n",
|
||||
"\n",
|
||||
"_EXTRACTION_TEMPLATE = \"\"\"Extract and save the relevant entities mentioned \\\n",
|
||||
"in the following passage together with their properties.\n",
|
||||
"\n",
|
||||
"If a property is not present and is not required in the function parameters, do not include it in the output.\"\"\" # noqa: E501\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_extraction_chain_pydantic(\n",
|
||||
" pydantic_schemas: Union[List[Type[BaseModel]], Type[BaseModel]],\n",
|
||||
" llm: BaseLanguageModel,\n",
|
||||
" system_message: str = _EXTRACTION_TEMPLATE,\n",
|
||||
") -> Runnable:\n",
|
||||
" if not isinstance(pydantic_schemas, list):\n",
|
||||
" pydantic_schemas = [pydantic_schemas]\n",
|
||||
" prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", system_message),\n",
|
||||
" (\"user\", \"{input}\")\n",
|
||||
" ])\n",
|
||||
" tools = [convert_pydantic_to_openai_tool(p) for p in pydantic_schemas]\n",
|
||||
" model = llm.bind(tools=tools)\n",
|
||||
" chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)\n",
|
||||
" return chain\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2eac6b68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -5,8 +5,8 @@
|
||||
"id": "052dfe58",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How (and why) to use the fake LLM\n",
|
||||
"We expose a fake LLM class that can be used for testing. This allows you to mock out calls to the LLM and simulate what would happen if the LLM responded in a certain way.\n",
|
||||
"# Fake LLM\n",
|
||||
"LangChain provides a fake LLM class that can be used for testing. This allows you to mock out calls to the LLM and simulate what would happen if the LLM responded in a certain way.\n",
|
||||
"\n",
|
||||
"In this notebook we go over how to use this.\n",
|
||||
"\n",
|
||||
@@ -30,9 +30,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import load_tools\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.agents import AgentType"
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -52,10 +50,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"responses=[\n",
|
||||
" \"Action: Python REPL\\nAction Input: print(2 + 2)\",\n",
|
||||
" \"Final Answer: 4\"\n",
|
||||
"]\n",
|
||||
"responses = [\"Action: Python REPL\\nAction Input: print(2 + 2)\", \"Final Answer: 4\"]\n",
|
||||
"llm = FakeListLLM(responses=responses)"
|
||||
]
|
||||
},
|
||||
@@ -66,7 +61,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -131,7 +128,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
493
cookbook/forward_looking_retrieval_augmented_generation.ipynb
Normal file
493
cookbook/forward_looking_retrieval_augmented_generation.ipynb
Normal file
@@ -0,0 +1,493 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f0b9afa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Retrieve as you generate with FLARE\n",
|
||||
"\n",
|
||||
"This notebook is an implementation of Forward-Looking Active REtrieval augmented generation (FLARE).\n",
|
||||
"\n",
|
||||
"Please see the original repo [here](https://github.com/jzbjyb/FLARE/tree/main).\n",
|
||||
"\n",
|
||||
"The basic idea is:\n",
|
||||
"\n",
|
||||
"- Start answering a question\n",
|
||||
"- If you start generating tokens the model is uncertain about, look up relevant documents\n",
|
||||
"- Use those documents to continue generating\n",
|
||||
"- Repeat until finished\n",
|
||||
"\n",
|
||||
"There is a lot of cool detail in how the lookup of relevant documents is done.\n",
|
||||
"Basically, the tokens that model is uncertain about are highlighted, and then an LLM is called to generate a question that would lead to that answer. For example, if the generated text is `Joe Biden went to Harvard`, and the tokens the model was uncertain about was `Harvard`, then a good generated question would be `where did Joe Biden go to college`. This generated question is then used in a retrieval step to fetch relevant documents.\n",
|
||||
"\n",
|
||||
"In order to set up this chain, we will need three things:\n",
|
||||
"\n",
|
||||
"- An LLM to generate the answer\n",
|
||||
"- An LLM to generate hypothetical questions to use in retrieval\n",
|
||||
"- A retriever to use to look up answers for\n",
|
||||
"\n",
|
||||
"The LLM that we use to generate the answer needs to return logprobs so we can identify uncertain tokens. For that reason, we HIGHLY recommend that you use the OpenAI wrapper (NB: not the ChatOpenAI wrapper, as that does not return logprobs).\n",
|
||||
"\n",
|
||||
"The LLM we use to generate hypothetical questions to use in retrieval can be anything. In this notebook we will use ChatOpenAI because it is fast and cheap.\n",
|
||||
"\n",
|
||||
"The retriever can be anything. In this notebook we will use [SERPER](https://serper.dev/) search engine, because it is cheap.\n",
|
||||
"\n",
|
||||
"Other important parameters to understand:\n",
|
||||
"\n",
|
||||
"- `max_generation_len`: The maximum number of tokens to generate before stopping to check if any are uncertain\n",
|
||||
"- `min_prob`: Any tokens generated with probability below this will be considered uncertain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a7e4b63d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "042bb161",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"SERPER_API_KEY\"] = \"\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a7888f4a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, List\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
" AsyncCallbackManagerForRetrieverRun,\n",
|
||||
" CallbackManagerForRetrieverRun,\n",
|
||||
")\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.schema import BaseRetriever, Document\n",
|
||||
"from langchain.utilities import GoogleSerperAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5f552dce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "59c7d875",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SerperSearchRetriever(BaseRetriever):\n",
|
||||
" search: GoogleSerperAPIWrapper = None\n",
|
||||
"\n",
|
||||
" def _get_relevant_documents(\n",
|
||||
" self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any\n",
|
||||
" ) -> List[Document]:\n",
|
||||
" return [Document(page_content=self.search.run(query))]\n",
|
||||
"\n",
|
||||
" async def _aget_relevant_documents(\n",
|
||||
" self,\n",
|
||||
" query: str,\n",
|
||||
" *,\n",
|
||||
" run_manager: AsyncCallbackManagerForRetrieverRun,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> List[Document]:\n",
|
||||
" raise NotImplementedError()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"retriever = SerperSearchRetriever(search=GoogleSerperAPIWrapper())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "92478194",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## FLARE Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "577e7c2c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We set this so we can see what exactly is going on\n",
|
||||
"from langchain.globals import set_verbose\n",
|
||||
"\n",
|
||||
"set_verbose(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "300d783e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import FlareChain\n",
|
||||
"\n",
|
||||
"flare = FlareChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0),\n",
|
||||
" retriever=retriever,\n",
|
||||
" max_generation_len=164,\n",
|
||||
" min_prob=0.3,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "1f3d5e90",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"explain in great detail the difference between the langchain framework and baby agi\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "4b1bfa8c",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new FlareChain chain...\u001b[0m\n",
|
||||
"\u001b[36;1m\u001b[1;3mCurrent Response: \u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.\n",
|
||||
"\n",
|
||||
">>> CONTEXT: \n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> RESPONSE: \u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new QuestionGeneratorChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" decentralized platform for natural language processing\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" uses a blockchain\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" distributed ledger to\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" process data, allowing for secure and transparent data sharing.\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" set of tools\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" help developers create\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" create an AI system\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" NLP applications\" is:\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3mGenerated Questions: ['What is the Langchain Framework?', 'What technology does the Langchain Framework use to store and process data for secure and transparent data sharing?', 'What technology does the Langchain Framework use to store and process data?', 'What does the Langchain Framework use a blockchain-based distributed ledger for?', 'What does the Langchain Framework provide in addition to a decentralized platform for natural language processing applications?', 'What set of tools and services does the Langchain Framework provide?', 'What is the purpose of Baby AGI?', 'What type of applications is the Langchain Framework designed for?']\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new _OpenAIResponseChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.\n",
|
||||
"\n",
|
||||
">>> CONTEXT: LangChain: Software. LangChain is a software development framework designed to simplify the creation of applications using large language models. LangChain Initial release date: October 2022. LangChain Programming languages: Python and JavaScript. LangChain Developer(s): Harrison Chase. LangChain License: MIT License. LangChain is a framework for developing applications powered by language models. We believe that the most powerful and differentiated applications will not only ... Type: Software framework. At its core, LangChain is a framework built around LLMs. We can use it for chatbots, Generative Question-Answering (GQA), summarization, and much more. LangChain is a powerful tool that can be used to work with Large Language Models (LLMs). LLMs are very general in nature, which means that while they can ... LangChain is an intuitive framework created to assist in developing applications driven by a language model, such as OpenAI or Hugging Face. LangChain is a software development framework designed to simplify the creation of applications using large language models (LLMs). Written in: Python and JavaScript. Initial release: October 2022. LangChain - The A.I-native developer toolkit We started LangChain with the intent to build a modular and flexible framework for developing A.I- ... LangChain explained in 3 minutes - LangChain is a ... Duration: 3:03. Posted: Apr 13, 2023. LangChain is a framework built to help you build LLM-powered applications more easily by providing you with the following:. LangChain is a framework that enables quick and easy development of applications that make use of Large Language Models, for example, GPT-3. LangChain is a powerful open-source framework for developing applications powered by language models. It connects to the AI models you want to ...\n",
|
||||
"\n",
|
||||
"LangChain is a framework for including AI from large language models inside data pipelines and applications. This tutorial provides an overview of what you ... Missing: secure | Must include:secure. Blockchain is the best way to secure the data of the shared community. Utilizing the capabilities of the blockchain nobody can read or interfere ... This modern technology consists of a chain of blocks that allows to securely store all committed transactions using shared and distributed ... A Blockchain network is used in the healthcare system to preserve and exchange patient data through hospitals, diagnostic laboratories, pharmacy firms, and ... In this article, I will walk you through the process of using the LangChain.js library with Google Cloud Functions, helping you leverage the ... LangChain is an intuitive framework created to assist in developing applications driven by a language model, such as OpenAI or Hugging Face. Missing: transparent | Must include:transparent. This technology keeps a distributed ledger on each blockchain node, making it more secure and transparent. The blockchain network can operate smart ... blockchain technology can offer a highly secured health data ledger to ... framework can be employed to store encrypted healthcare data in a ... In a simplified way, Blockchain is a data structure that stores transactions in an ordered way and linked to the previous block, serving as a ... Blockchain technology is a decentralized, distributed ledger that stores the record of ownership of digital assets. Missing: Langchain | Must include:Langchain.\n",
|
||||
"\n",
|
||||
"LangChain is a framework for including AI from large language models inside data pipelines and applications. This tutorial provides an overview of what you ... LangChain is an intuitive framework created to assist in developing applications driven by a language model, such as OpenAI or Hugging Face. This documentation covers the steps to integrate Pinecone, a high-performance vector database, with LangChain, a framework for building applications powered ... The ability to connect to any model, ingest any custom database, and build upon a framework that can take action provides numerous use cases for ... With LangChain, developers can use a framework that abstracts the core building blocks of LLM applications. LangChain empowers developers to ... Build a question-answering tool based on financial data with LangChain & Deep Lake's unified & streamable data store. Browse applications built on LangChain technology. Explore PoC and MVP applications created by our community and discover innovative use cases for LangChain ... LangChain is a great framework that can be used for developing applications powered by LLMs. When you intend to enhance your application ... In this blog, we'll introduce you to LangChain and Ray Serve and how to use them to build a search engine using LLM embeddings and a vector ... The LinkChain Framework simplifies embedding creation and storage using Pinecone and Chroma, with code that loads files, splits documents, and creates embedding ... Missing: technology | Must include:technology.\n",
|
||||
"\n",
|
||||
"Blockchain is one type of a distributed ledger. Distributed ledgers use independent computers (referred to as nodes) to record, share and ... Missing: Langchain | Must include:Langchain. Blockchain is used in distributed storage software where huge data is broken down into chunks. This is available in encrypted data across a ... People sometimes use the terms 'Blockchain' and 'Distributed Ledger' interchangeably. This post aims to analyze the features of each. A distributed ledger ... Missing: Framework | Must include:Framework. Think of a “distributed ledger” that uses cryptography to allow each participant in the transaction to add to the ledger in a secure way without ... In this paper, we provide an overview of the history of trade settlement and discuss this nascent technology that may now transform traditional ... Missing: Langchain | Must include:Langchain. LangChain is a blockchain-based language education platform that aims to revolutionize the way people learn languages. Missing: Framework | Must include:Framework. It uses the distributed ledger technology framework and Smart contract engine for building scalable Business Blockchain applications. The fabric ... It looks at the assets the use case is handling, the different parties conducting transactions, and the smart contract, distributed ... Are you curious to know how Blockchain and Distributed ... Duration: 44:31. Posted: May 4, 2021. A blockchain is a distributed and immutable ledger to transfer ownership, record transactions, track assets, and ensure transparency, security, trust and value ... Missing: Langchain | Must include:Langchain.\n",
|
||||
"\n",
|
||||
"LangChain is an intuitive framework created to assist in developing applications driven by a language model, such as OpenAI or Hugging Face. Missing: decentralized | Must include:decentralized. LangChain, created by Harrison Chase, is a Python library that provides out-of-the-box support to build NLP applications using LLMs. Missing: decentralized | Must include:decentralized. LangChain provides a standard interface for chains, enabling developers to create sequences of calls that go beyond a single LLM call. Chains ... Missing: decentralized platform natural. LangChain is a powerful framework that simplifies the process of building advanced language model applications. Missing: platform | Must include:platform. Are your language models ignoring previous instructions ... Duration: 32:23. Posted: Feb 21, 2023. LangChain is a framework that enables quick and easy development of applications ... Prompting is the new way of programming NLP models. Missing: decentralized platform. It then uses natural language processing and machine learning algorithms to search ... Summarization is handled via cohere, QnA is handled via langchain, ... LangChain is a framework for developing applications powered by language models. ... There are several main modules that LangChain provides support for. Missing: decentralized platform. In the healthcare-chain system, blockchain provides an appreciated secure ... The entire process of adding new and previous block data is performed based on ... ChatGPT is a large language model developed by OpenAI, ... tool for a wide range of applications, including natural language processing, ...\n",
|
||||
"\n",
|
||||
"LangChain is a powerful tool that can be used to work with Large Language ... If an API key has been provided, create an OpenAI language model instance At its core, LangChain is a framework built around LLMs. We can use it for chatbots, Generative Question-Answering (GQA), summarization, and much more. A tutorial of the six core modules of the LangChain Python package covering models, prompts, chains, agents, indexes, and memory with OpenAI ... LangChain's collection of tools refers to a set of tools provided by the LangChain framework for developing applications powered by language models. LangChain is a framework for developing applications powered by language models. We believe that the most powerful and differentiated applications will not only ... LangChain is an open-source library that provides developers with the tools to build applications powered by large language models (LLMs). LangChain is a framework for including AI from large language models inside data pipelines and applications. This tutorial provides an overview of what you ... Plan-and-Execute Agents · Feature Stores and LLMs · Structured Tools · Auto-Evaluator Opportunities · Callbacks Improvements · Unleashing the power ... Tool: A function that performs a specific duty. This can be things like: Google Search, Database lookup, Python REPL, other chains. · LLM: The language model ... LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.\n",
|
||||
"\n",
|
||||
"Baby AGI has the ability to complete tasks, generate new tasks based on previous results, and prioritize tasks in real-time. This system is exploring and demonstrating to us the potential of large language models, such as GPT and how it can autonomously perform tasks. Apr 17, 2023\n",
|
||||
"\n",
|
||||
"At its core, LangChain is a framework built around LLMs. We can use it for chatbots, Generative Question-Answering (GQA), summarization, and much more. The core idea of the library is that we can “chain” together different components to create more advanced use cases around LLMs.\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> RESPONSE: \u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' LangChain is a framework for developing applications powered by language models. It provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications. On the other hand, Baby AGI is an AI system that is exploring and demonstrating the potential of large language models, such as GPT, and how it can autonomously perform tasks. Baby AGI has the ability to complete tasks, generate new tasks based on previous results, and prioritize tasks in real-time. '"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"flare.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "7bed8944",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nThe Langchain framework and Baby AGI are both artificial intelligence (AI) frameworks that are used to create intelligent agents. The Langchain framework is a supervised learning system that is based on the concept of “language chains”. It uses a set of rules to map natural language inputs to specific outputs. It is a general-purpose AI framework and can be used to build applications such as natural language processing (NLP), chatbots, and more.\\n\\nBaby AGI, on the other hand, is an unsupervised learning system that uses neural networks and reinforcement learning to learn from its environment. It is used to create intelligent agents that can adapt to changing environments. It is a more advanced AI system and can be used to build more complex applications such as game playing, robotic vision, and more.\\n\\nThe main difference between the two is that the Langchain framework uses supervised learning while Baby AGI uses unsupervised learning. The Langchain framework is a general-purpose AI framework that can be used for various applications, while Baby AGI is a more advanced AI system that can be used to create more complex applications.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI()\n",
|
||||
"llm(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "8fb76286",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new FlareChain chain...\u001b[0m\n",
|
||||
"\u001b[36;1m\u001b[1;3mCurrent Response: \u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.\n",
|
||||
"\n",
|
||||
">>> CONTEXT: \n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> RESPONSE: \u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new QuestionGeneratorChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"\n",
|
||||
"Langchain and Bitcoin have very different origin stories. Bitcoin was created by the mysterious Satoshi Nakamoto in 2008 as a decentralized digital currency. Langchain, on the other hand, was created in 2020 by a team of developers as a platform for creating and managing decentralized language learning applications. \n",
|
||||
"\n",
|
||||
"FINISHED\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" very different origin\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"\n",
|
||||
"Langchain and Bitcoin have very different origin stories. Bitcoin was created by the mysterious Satoshi Nakamoto in 2008 as a decentralized digital currency. Langchain, on the other hand, was created in 2020 by a team of developers as a platform for creating and managing decentralized language learning applications. \n",
|
||||
"\n",
|
||||
"FINISHED\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" 2020 by a\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"\n",
|
||||
"Langchain and Bitcoin have very different origin stories. Bitcoin was created by the mysterious Satoshi Nakamoto in 2008 as a decentralized digital currency. Langchain, on the other hand, was created in 2020 by a team of developers as a platform for creating and managing decentralized language learning applications. \n",
|
||||
"\n",
|
||||
"FINISHED\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" developers as a platform for creating and managing decentralized language learning applications.\" is:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3mGenerated Questions: ['How would you describe the origin stories of Langchain and Bitcoin in terms of their similarities or differences?', 'When was Langchain created and by whom?', 'What was the purpose of creating Langchain?']\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new _OpenAIResponseChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.\n",
|
||||
"\n",
|
||||
">>> CONTEXT: Bitcoin and Ethereum have many similarities but different long-term visions and limitations. Ethereum changed from proof of work to proof of ... Bitcoin will be around for many years and examining its white paper origins is a great exercise in understanding why. Satoshi Nakamoto's blueprint describes ... Bitcoin is a new currency that was created in 2009 by an unknown person using the alias Satoshi Nakamoto. Transactions are made with no middle men – meaning, no ... Missing: Langchain | Must include:Langchain. By comparison, Bitcoin transaction speeds are tremendously lower. ... learn about its history and its role in the emergence of the Bitcoin ... LangChain is a powerful framework that simplifies the process of ... tasks like document retrieval, clustering, and similarity comparisons. Key terms: Bitcoin System, Blockchain Technology, ... Furthermore, the research paper will discuss and compare the five payment. Blockchain first appeared in Nakamoto's Bitcoin white paper that describes a new decentralized cryptocurrency [1]. Bitcoin takes the blockchain technology ... Missing: stories | Must include:stories. A score of 0 means there were not enough data for this term. Google trends was accessed on 5 November 2018 with searches for bitcoin, euro, gold ... Contracts, transactions, and records of them provide critical structure in our economic system, but they haven't kept up with the world's digital ... Missing: Langchain | Must include:Langchain. Of course, traders try to make a profit on their portfolio in this way.The difference between investing and trading is the regularity with which ...\n",
|
||||
"\n",
|
||||
"After all these giant leaps forward in the LLM space, OpenAI released ChatGPT — thrusting LLMs into the spotlight. LangChain appeared around the same time. Its creator, Harrison Chase, made the first commit in late October 2022. Leaving a short couple of months of development before getting caught in the LLM wave.\n",
|
||||
"\n",
|
||||
"At its core, LangChain is a framework built around LLMs. We can use it for chatbots, Generative Question-Answering (GQA), summarization, and much more. The core idea of the library is that we can “chain” together different components to create more advanced use cases around LLMs.\n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> RESPONSE: \u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The origin stories of LangChain and Bitcoin are quite different. Bitcoin was created in 2009 by an unknown person using the alias Satoshi Nakamoto. LangChain was created in late October 2022 by Harrison Chase. Bitcoin is a decentralized cryptocurrency, while LangChain is a framework built around LLMs. '"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"flare.run(\"how are the origin stories of langchain and bitcoin similar or different?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fbadd022",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,994 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9732067-71c7-46f7-ad09-381b3bf21a27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Generative Agents in LangChain\n",
|
||||
"\n",
|
||||
"This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.\n",
|
||||
"\n",
|
||||
"In it, we leverage a time-weighted Memory object backed by a LangChain Retriever."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "53f81c37-db45-4fdc-843c-aa8fd2a9e99d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Use termcolor to make it easy to colorize the outputs.\n",
|
||||
"!pip install termcolor > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "3128fc21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.ERROR)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8851c370-b395-4b80-a79d-486a38ffc244",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import datetime, timedelta\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.retrievers import TimeWeightedVectorStoreRetriever\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from termcolor import colored"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "81824e76",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"USER_NAME = \"Person A\" # The name you want to use when interviewing the agent.\n",
|
||||
"LLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3da1649-d88f-4973-b655-7042975cde7e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generative Agent Memory Components\n",
|
||||
"\n",
|
||||
"This tutorial highlights the memory of generative agents and its impact on their behavior. The memory varies from standard LangChain Chat memory in two aspects:\n",
|
||||
"\n",
|
||||
"1. **Memory Formation**\n",
|
||||
"\n",
|
||||
" Generative Agents have extended memories, stored in a single stream:\n",
|
||||
" 1. Observations - from dialogues or interactions with the virtual world, about self or others\n",
|
||||
" 2. Reflections - resurfaced and summarized core memories\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"2. **Memory Recall**\n",
|
||||
"\n",
|
||||
" Memories are retrieved using a weighted sum of salience, recency, and importance.\n",
|
||||
"\n",
|
||||
"You can review the definitions of the `GenerativeAgent` and `GenerativeAgentMemory` in the [reference documentation](\"https://api.python.langchain.com/en/latest/modules/experimental.html\") for the following imports, focusing on `add_memory` and `summarize_related_memories` methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "043e5203-6a41-431c-9efa-3e1743d7d25a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.generative_agents import (\n",
|
||||
" GenerativeAgent,\n",
|
||||
" GenerativeAgentMemory,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "361bd49e",
|
||||
"metadata": {
|
||||
"jp-MarkdownHeadingCollapsed": true,
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Memory Lifecycle\n",
|
||||
"\n",
|
||||
"Summarizing the key methods in the above: `add_memory` and `summarize_related_memories`.\n",
|
||||
"\n",
|
||||
"When an agent makes an observation, it stores the memory:\n",
|
||||
" \n",
|
||||
"1. Language model scores the memory's importance (1 for mundane, 10 for poignant)\n",
|
||||
"2. Observation and importance are stored within a document by TimeWeightedVectorStoreRetriever, with a `last_accessed_time`.\n",
|
||||
"\n",
|
||||
"When an agent responds to an observation:\n",
|
||||
"\n",
|
||||
"1. Generates query(s) for retriever, which fetches documents based on salience, recency, and importance.\n",
|
||||
"2. Summarizes the retrieved information\n",
|
||||
"3. Updates the `last_accessed_time` for the used documents.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2fa3ca02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a Generative Character\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Now that we've walked through the definition, we will create two characters named \"Tommie\" and \"Eve\"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ee9c1a1d-c311-4f1c-8131-75fccd9025b1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import math\n",
|
||||
"\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def relevance_score_fn(score: float) -> float:\n",
|
||||
" \"\"\"Return a similarity score on a scale [0, 1].\"\"\"\n",
|
||||
" # This will differ depending on a few things:\n",
|
||||
" # - the distance / similarity metric used by the VectorStore\n",
|
||||
" # - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)\n",
|
||||
" # This function converts the euclidean norm of normalized embeddings\n",
|
||||
" # (0 is most similar, sqrt(2) most dissimilar)\n",
|
||||
" # to a similarity function (0 to 1)\n",
|
||||
" return 1.0 - score / math.sqrt(2)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_new_memory_retriever():\n",
|
||||
" \"\"\"Create a new vector store retriever unique to the agent.\"\"\"\n",
|
||||
" # Define your embedding model\n",
|
||||
" embeddings_model = OpenAIEmbeddings()\n",
|
||||
" # Initialize the vectorstore as empty\n",
|
||||
" embedding_size = 1536\n",
|
||||
" index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
" vectorstore = FAISS(\n",
|
||||
" embeddings_model.embed_query,\n",
|
||||
" index,\n",
|
||||
" InMemoryDocstore({}),\n",
|
||||
" {},\n",
|
||||
" relevance_score_fn=relevance_score_fn,\n",
|
||||
" )\n",
|
||||
" return TimeWeightedVectorStoreRetriever(\n",
|
||||
" vectorstore=vectorstore, other_score_keys=[\"importance\"], k=15\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7884f9dd-c597-4c27-8c77-1402c71bc2f8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tommies_memory = GenerativeAgentMemory(\n",
|
||||
" llm=LLM,\n",
|
||||
" memory_retriever=create_new_memory_retriever(),\n",
|
||||
" verbose=False,\n",
|
||||
" reflection_threshold=8, # we will give this a relatively low number to show how reflection works\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"tommie = GenerativeAgent(\n",
|
||||
" name=\"Tommie\",\n",
|
||||
" age=25,\n",
|
||||
" traits=\"anxious, likes design, talkative\", # You can add more persistent traits here\n",
|
||||
" status=\"looking for a job\", # When connected to a virtual world, we can have the characters update their status\n",
|
||||
" memory_retriever=create_new_memory_retriever(),\n",
|
||||
" llm=LLM,\n",
|
||||
" memory=tommies_memory,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "c524d529",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Tommie (age: 25)\n",
|
||||
"Innate traits: anxious, likes design, talkative\n",
|
||||
"No information about Tommie's core characteristics is provided in the given statements.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# The current \"Summary\" of a character can't be made because the agent hasn't made\n",
|
||||
"# any observations yet.\n",
|
||||
"print(tommie.get_summary())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "4be60979-d56e-4abf-a636-b34ffa8b7fba",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We can add memories directly to the memory object\n",
|
||||
"tommie_observations = [\n",
|
||||
" \"Tommie remembers his dog, Bruno, from when he was a kid\",\n",
|
||||
" \"Tommie feels tired from driving so far\",\n",
|
||||
" \"Tommie sees the new home\",\n",
|
||||
" \"The new neighbors have a cat\",\n",
|
||||
" \"The road is noisy at night\",\n",
|
||||
" \"Tommie is hungry\",\n",
|
||||
" \"Tommie tries to get some rest.\",\n",
|
||||
"]\n",
|
||||
"for observation in tommie_observations:\n",
|
||||
" tommie.memory.add_memory(observation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "6992b48b-697f-4973-9560-142ef85357d7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Tommie (age: 25)\n",
|
||||
"Innate traits: anxious, likes design, talkative\n",
|
||||
"Tommie is a person who is observant of his surroundings, has a sentimental side, and experiences basic human needs such as hunger and the need for rest. He also tends to get tired easily and is affected by external factors such as noise from the road or a neighbor's pet.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.\n",
|
||||
"# We will see how this summary updates after more observations to create a more rich description.\n",
|
||||
"print(tommie.get_summary(force_refresh=True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "40d39a32-838c-4a03-8b27-a52c76c402e7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Pre-Interview with Character\n",
|
||||
"\n",
|
||||
"Before sending our character on their way, let's ask them a few questions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "eaf125d8-f54c-4c5f-b6af-32789b1f7d3a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def interview_agent(agent: GenerativeAgent, message: str) -> str:\n",
|
||||
" \"\"\"Help the notebook user interact with the agent.\"\"\"\n",
|
||||
" new_message = f\"{USER_NAME} says {message}\"\n",
|
||||
" return agent.generate_dialogue_response(new_message)[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "54024d41-6e83-4914-91e5-73140e2dd9c8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"I really enjoy design and being creative. I\\'ve been working on some personal projects lately. What about you, Person A? What do you like to do?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"What do you like to do?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "71e2e8cc-921e-4816-82f1-66962b2c1055",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"Well, I\\'m actually looking for a job right now, so hopefully I can find some job postings online and start applying. How about you, Person A? What\\'s on your schedule for today?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"What are you looking forward to doing today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "a2521ffc-7050-4ac3-9a18-4cccfc798c31",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"Honestly, I\\'m feeling pretty anxious about finding a job. It\\'s been a bit of a struggle lately, but I\\'m trying to stay positive and keep searching. How about you, Person A? What worries you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"What are you most worried about today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e509c468-f7cd-4d72-9f3a-f4aba28b1eea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step through the day's observations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "154dee3d-bfe0-4828-b963-ed7e885799b3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Let's have Tommie start going through a day in the life.\n",
|
||||
"observations = [\n",
|
||||
" \"Tommie wakes up to the sound of a noisy construction site outside his window.\",\n",
|
||||
" \"Tommie gets out of bed and heads to the kitchen to make himself some coffee.\",\n",
|
||||
" \"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\",\n",
|
||||
" \"Tommie finally finds the filters and makes himself a cup of coffee.\",\n",
|
||||
" \"The coffee tastes bitter, and Tommie regrets not buying a better brand.\",\n",
|
||||
" \"Tommie checks his email and sees that he has no job offers yet.\",\n",
|
||||
" \"Tommie spends some time updating his resume and cover letter.\",\n",
|
||||
" \"Tommie heads out to explore the city and look for job openings.\",\n",
|
||||
" \"Tommie sees a sign for a job fair and decides to attend.\",\n",
|
||||
" \"The line to get in is long, and Tommie has to wait for an hour.\",\n",
|
||||
" \"Tommie meets several potential employers at the job fair but doesn't receive any offers.\",\n",
|
||||
" \"Tommie leaves the job fair feeling disappointed.\",\n",
|
||||
" \"Tommie stops by a local diner to grab some lunch.\",\n",
|
||||
" \"The service is slow, and Tommie has to wait for 30 minutes to get his food.\",\n",
|
||||
" \"Tommie overhears a conversation at the next table about a job opening.\",\n",
|
||||
" \"Tommie asks the diners about the job opening and gets some information about the company.\",\n",
|
||||
" \"Tommie decides to apply for the job and sends his resume and cover letter.\",\n",
|
||||
" \"Tommie continues his search for job openings and drops off his resume at several local businesses.\",\n",
|
||||
" \"Tommie takes a break from his job search to go for a walk in a nearby park.\",\n",
|
||||
" \"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.\",\n",
|
||||
" \"Tommie sees a group of people playing frisbee and decides to join in.\",\n",
|
||||
" \"Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\",\n",
|
||||
" \"Tommie goes back to his apartment to rest for a bit.\",\n",
|
||||
" \"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\",\n",
|
||||
" \"Tommie starts to feel frustrated with his job search.\",\n",
|
||||
" \"Tommie calls his best friend to vent about his struggles.\",\n",
|
||||
" \"Tommie's friend offers some words of encouragement and tells him to keep trying.\",\n",
|
||||
" \"Tommie feels slightly better after talking to his friend.\",\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "238be49c-edb3-4e26-a2b6-98777ba8de86",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[32mTommie wakes up to the sound of a noisy construction site outside his window.\u001b[0m Tommie groans and covers his head with a pillow, trying to block out the noise.\n",
|
||||
"\u001b[32mTommie gets out of bed and heads to the kitchen to make himself some coffee.\u001b[0m Tommie stretches his arms and yawns before starting to make the coffee.\n",
|
||||
"\u001b[32mTommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\u001b[0m Tommie sighs in frustration and continues searching through the boxes.\n",
|
||||
"\u001b[32mTommie finally finds the filters and makes himself a cup of coffee.\u001b[0m Tommie takes a deep breath and enjoys the aroma of the fresh coffee.\n",
|
||||
"\u001b[32mThe coffee tastes bitter, and Tommie regrets not buying a better brand.\u001b[0m Tommie grimaces and sets the coffee mug aside.\n",
|
||||
"\u001b[32mTommie checks his email and sees that he has no job offers yet.\u001b[0m Tommie sighs and closes his laptop, feeling discouraged.\n",
|
||||
"\u001b[32mTommie spends some time updating his resume and cover letter.\u001b[0m Tommie nods, feeling satisfied with his progress.\n",
|
||||
"\u001b[32mTommie heads out to explore the city and look for job openings.\u001b[0m Tommie feels a surge of excitement and anticipation as he steps out into the city.\n",
|
||||
"\u001b[32mTommie sees a sign for a job fair and decides to attend.\u001b[0m Tommie feels hopeful and excited about the possibility of finding job opportunities at the job fair.\n",
|
||||
"\u001b[32mThe line to get in is long, and Tommie has to wait for an hour.\u001b[0m Tommie taps his foot impatiently and checks his phone for the time.\n",
|
||||
"\u001b[32mTommie meets several potential employers at the job fair but doesn't receive any offers.\u001b[0m Tommie feels disappointed and discouraged, but he remains determined to keep searching for job opportunities.\n",
|
||||
"\u001b[32mTommie leaves the job fair feeling disappointed.\u001b[0m Tommie feels disappointed and discouraged, but he remains determined to keep searching for job opportunities.\n",
|
||||
"\u001b[32mTommie stops by a local diner to grab some lunch.\u001b[0m Tommie feels relieved to take a break and satisfy his hunger.\n",
|
||||
"\u001b[32mThe service is slow, and Tommie has to wait for 30 minutes to get his food.\u001b[0m Tommie feels frustrated and impatient due to the slow service.\n",
|
||||
"\u001b[32mTommie overhears a conversation at the next table about a job opening.\u001b[0m Tommie feels a surge of hope and excitement at the possibility of a job opportunity but decides not to interfere with the conversation at the next table.\n",
|
||||
"\u001b[32mTommie asks the diners about the job opening and gets some information about the company.\u001b[0m Tommie said \"Excuse me, I couldn't help but overhear your conversation about the job opening. Could you give me some more information about the company?\"\n",
|
||||
"\u001b[32mTommie decides to apply for the job and sends his resume and cover letter.\u001b[0m Tommie feels hopeful and proud of himself for taking action towards finding a job.\n",
|
||||
"\u001b[32mTommie continues his search for job openings and drops off his resume at several local businesses.\u001b[0m Tommie feels hopeful and determined to keep searching for job opportunities.\n",
|
||||
"\u001b[32mTommie takes a break from his job search to go for a walk in a nearby park.\u001b[0m Tommie feels refreshed and rejuvenated after taking a break in the park.\n",
|
||||
"\u001b[32mA dog approaches and licks Tommie's feet, and he pets it for a few minutes.\u001b[0m Tommie feels happy and enjoys the brief interaction with the dog.\n",
|
||||
"****************************************\n",
|
||||
"\u001b[34mAfter 20 observations, Tommie's summary is:\n",
|
||||
"Name: Tommie (age: 25)\n",
|
||||
"Innate traits: anxious, likes design, talkative\n",
|
||||
"Tommie is determined and hopeful in his search for job opportunities, despite encountering setbacks and disappointments. He is also able to take breaks and care for his physical needs, such as getting rest and satisfying his hunger. Tommie is nostalgic towards his past, as shown by his memory of his childhood dog. Overall, Tommie is a hardworking and resilient individual who remains focused on his goals.\u001b[0m\n",
|
||||
"****************************************\n",
|
||||
"\u001b[32mTommie sees a group of people playing frisbee and decides to join in.\u001b[0m Do nothing.\n",
|
||||
"\u001b[32mTommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\u001b[0m Tommie feels pain and puts a hand to his nose to check for any injury.\n",
|
||||
"\u001b[32mTommie goes back to his apartment to rest for a bit.\u001b[0m Tommie feels relieved to take a break and rest for a bit.\n",
|
||||
"\u001b[32mA raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\u001b[0m Tommie feels annoyed and frustrated at the mess caused by the raccoon.\n",
|
||||
"\u001b[32mTommie starts to feel frustrated with his job search.\u001b[0m Tommie feels discouraged but remains determined to keep searching for job opportunities.\n",
|
||||
"\u001b[32mTommie calls his best friend to vent about his struggles.\u001b[0m Tommie said \"Hey, can I talk to you for a bit? I'm feeling really frustrated with my job search.\"\n",
|
||||
"\u001b[32mTommie's friend offers some words of encouragement and tells him to keep trying.\u001b[0m Tommie said \"Thank you, I really appreciate your support and encouragement.\"\n",
|
||||
"\u001b[32mTommie feels slightly better after talking to his friend.\u001b[0m Tommie feels grateful for his friend's support.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve\n",
|
||||
"for i, observation in enumerate(observations):\n",
|
||||
" _, reaction = tommie.generate_reaction(observation)\n",
|
||||
" print(colored(observation, \"green\"), reaction)\n",
|
||||
" if ((i + 1) % 20) == 0:\n",
|
||||
" print(\"*\" * 40)\n",
|
||||
" print(\n",
|
||||
" colored(\n",
|
||||
" f\"After {i+1} observations, Tommie's summary is:\\n{tommie.get_summary(force_refresh=True)}\",\n",
|
||||
" \"blue\",\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" print(\"*\" * 40)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd62a275-7290-43ca-aa0f-504f3a706d09",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Interview after the day"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "6336ab5d-3074-4831-951f-c9e2cba5dfb5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"It\\'s been a bit of a rollercoaster, to be honest. I\\'ve had some setbacks in my job search, but I also had some good moments today, like sending out a few resumes and meeting some potential employers at a job fair. How about you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"Tell me about how your day has been going\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "809ac906-69b7-4326-99ec-af638d32bb20",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"I really enjoy coffee, but sometimes I regret not buying a better brand. How about you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"How do you feel about coffee?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "f733a431-19ea-421a-9101-ae2593a8c626",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"Oh, I had a dog named Bruno when I was a kid. He was a golden retriever and my best friend. I have so many fond memories of him.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"Tell me about your childhood dog!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c9261428-778a-4c0b-b725-bc9e91b71391",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding Multiple Characters\n",
|
||||
"\n",
|
||||
"Let's add a second character to have a conversation with Tommie. Feel free to configure different traits."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "ec8bbe18-a021-419c-bf1f-23d34732cd99",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"eves_memory = GenerativeAgentMemory(\n",
|
||||
" llm=LLM,\n",
|
||||
" memory_retriever=create_new_memory_retriever(),\n",
|
||||
" verbose=False,\n",
|
||||
" reflection_threshold=5,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"eve = GenerativeAgent(\n",
|
||||
" name=\"Eve\",\n",
|
||||
" age=34,\n",
|
||||
" traits=\"curious, helpful\", # You can add more persistent traits here\n",
|
||||
" status=\"N/A\", # When connected to a virtual world, we can have the characters update their status\n",
|
||||
" llm=LLM,\n",
|
||||
" daily_summaries=[\n",
|
||||
" (\n",
|
||||
" \"Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie.\"\n",
|
||||
" )\n",
|
||||
" ],\n",
|
||||
" memory=eves_memory,\n",
|
||||
" verbose=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "1e2745f5-e0da-4abd-98b4-830802ce6698",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"yesterday = (datetime.now() - timedelta(days=1)).strftime(\"%A %B %d\")\n",
|
||||
"eve_observations = [\n",
|
||||
" \"Eve wakes up and hear's the alarm\",\n",
|
||||
" \"Eve eats a boal of porridge\",\n",
|
||||
" \"Eve helps a coworker on a task\",\n",
|
||||
" \"Eve plays tennis with her friend Xu before going to work\",\n",
|
||||
" \"Eve overhears her colleague say something about Tommie being hard to work with\",\n",
|
||||
"]\n",
|
||||
"for observation in eve_observations:\n",
|
||||
" eve.memory.add_memory(observation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "de4726e3-4bb1-47da-8fd9-f317a036fe0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Eve (age: 34)\n",
|
||||
"Innate traits: curious, helpful\n",
|
||||
"Eve is a helpful and active person who enjoys sports and takes care of her physical health. She is attentive to her surroundings, including her colleagues, and has good time management skills.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(eve.get_summary())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "837524e9-7f7e-4e9f-b610-f454062f5915",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Pre-conversation interviews\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Let's \"Interview\" Eve before she speaks with Tommie."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "6cda916d-800c-47bc-a7f9-6a2f19187472",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"I\\'m feeling pretty good, thanks for asking! Just trying to stay productive and make the most of the day. How about you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 50,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(eve, \"How are you feeling about today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "448ae644-0a66-4eb2-a03a-319f36948b37",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"I don\\'t know much about Tommie, but I heard someone mention that they find them difficult to work with. Have you had any experiences working with Tommie?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(eve, \"What do you know about Tommie?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 52,
|
||||
"id": "493fc5b8-8730-4ef8-9820-0f1769ce1691",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"That\\'s interesting. I don\\'t know much about Tommie\\'s work experience, but I would probably ask about his strengths and areas for improvement. What about you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 52,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(\n",
|
||||
" eve,\n",
|
||||
" \"Tommie is looking to find a job. What are are some things you'd like to ask him?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"id": "4b46452a-6c54-4db2-9d87-18597f70fec8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"Sure, I can keep the conversation going and ask plenty of questions. I want to make sure Tommie feels comfortable and supported. Thanks for letting me know.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 53,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(\n",
|
||||
" eve,\n",
|
||||
" \"You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd780655-1d73-4fcb-a78d-79fd46a20636",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dialogue between Generative Agents\n",
|
||||
"\n",
|
||||
"Generative agents are much more complex when they interact with a virtual environment or with each other. Below, we run a simple conversation between Tommie and Eve."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 54,
|
||||
"id": "042ea271-4bf1-4247-9082-239a6fea43b8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:\n",
|
||||
" \"\"\"Runs a conversation between agents.\"\"\"\n",
|
||||
" _, observation = agents[1].generate_reaction(initial_observation)\n",
|
||||
" print(observation)\n",
|
||||
" turns = 0\n",
|
||||
" while True:\n",
|
||||
" break_dialogue = False\n",
|
||||
" for agent in agents:\n",
|
||||
" stay_in_dialogue, observation = agent.generate_dialogue_response(\n",
|
||||
" observation\n",
|
||||
" )\n",
|
||||
" print(observation)\n",
|
||||
" # observation = f\"{agent.name} said {reaction}\"\n",
|
||||
" if not stay_in_dialogue:\n",
|
||||
" break_dialogue = True\n",
|
||||
" if break_dialogue:\n",
|
||||
" break\n",
|
||||
" turns += 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"id": "d5462b14-218e-4d85-b035-df57ea8e0f80",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Eve said \"Sure, Tommie. I'd be happy to share about my experience. Where would you like me to start?\"\n",
|
||||
"Tommie said \"That's great, thank you! How about you start by telling me about your previous work experience?\"\n",
|
||||
"Eve said \"Sure, I'd be happy to share my previous work experience with you. I've worked in a few different industries, including marketing and event planning. What specific questions do you have for me?\"\n",
|
||||
"Tommie said \"That's great to hear. Can you tell me more about your experience in event planning? I've always been interested in that field.\"\n",
|
||||
"Eve said \"Sure, I'd be happy to share about my experience in event planning. I've worked on a variety of events, from corporate conferences to weddings. One of the biggest challenges I faced was managing multiple vendors and ensuring everything ran smoothly on the day of the event. What specific questions do you have?\"\n",
|
||||
"Tommie said \"That sounds like a lot of responsibility! Can you tell me more about how you handled the challenges that came up during those events?\"\n",
|
||||
"Eve said \"Sure, Tommie. I'd be happy to share with you how I handled those challenges. One approach that worked well for me was to stay organized and create a detailed timeline for the event. This helped me keep track of all the different tasks that needed to be done and when they needed to be completed. I also made sure to communicate clearly with all the vendors and team members involved in the event to ensure everyone was on the same page. Would you like me to go into more detail?\"\n",
|
||||
"Tommie said \"Thank you for sharing that with me, Eve. That sounds like a great approach to managing events. Can you tell me more about how you handled any unexpected issues that came up during the events?\"\n",
|
||||
"Eve said \"Of course, Tommie. One example of an unexpected issue I faced was when one of the vendors didn't show up on time. To handle this, I quickly contacted a backup vendor and was able to get everything back on track. It's always important to have a backup plan in case things don't go as planned. Do you have any other questions about event planning?\"\n",
|
||||
"Tommie said \"Thank you for sharing that with me, Eve. It's really helpful to hear how you handled unexpected issues like that. Can you give me an example of how you communicated with your team to ensure everyone was on the same page during an event?\"\n",
|
||||
"Eve said \"Sure, Tommie. One thing I did to ensure everyone was on the same page was to have regular check-ins and meetings with the team leading up to the event. This helped us address any issues or concerns early on and make sure everyone was clear on their roles and responsibilities. Have you ever had to manage a team for an event before?\"\n",
|
||||
"Tommie said \"That's a great idea, Eve. I haven't had the opportunity to manage a team for an event yet, but I'll definitely keep that in mind for the future. Thank you for sharing your experience with me.\"\n",
|
||||
"Eve said \"Thanks for the opportunity to share my experience, Tommie. It was great meeting with you today.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agents = [tommie, eve]\n",
|
||||
"run_conversation(\n",
|
||||
" agents,\n",
|
||||
" \"Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b28fe80-03dc-4399-961d-6e9ee1980216",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Let's interview our agents after their conversation\n",
|
||||
"\n",
|
||||
"Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memoreis."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"id": "c4d252f3-fcc1-474c-846e-a7605a6b4ce7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Tommie (age: 25)\n",
|
||||
"Innate traits: anxious, likes design, talkative\n",
|
||||
"Tommie is determined and hopeful in his job search, but can also feel discouraged and frustrated at times. He has a strong connection to his childhood dog, Bruno. Tommie seeks support from his friends when feeling overwhelmed and is grateful for their help. He also enjoys exploring his new city.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can see a current \"Summary\" of a character based on their own perception of self\n",
|
||||
"# has changed\n",
|
||||
"print(tommie.get_summary(force_refresh=True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "c04db9a4",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Eve (age: 34)\n",
|
||||
"Innate traits: curious, helpful\n",
|
||||
"Eve is a helpful and friendly person who enjoys playing sports and staying productive. She is attentive and responsive to others' needs, actively listening and asking questions to understand their perspectives. Eve has experience in event planning and communication, and is willing to share her knowledge and expertise with others. She values teamwork and collaboration, and strives to create a comfortable and supportive environment for everyone.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(eve.get_summary(force_refresh=True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "71762558-8fb6-44d7-8483-f5b47fb2a862",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"It was really helpful actually. Eve shared some great tips on managing events and handling unexpected issues. I feel like I learned a lot from her experience.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 58,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"How was your conversation with Eve?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "085af3d8-ac21-41ea-8f8b-055c56976a67",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"It was great, thanks for asking. Tommie was very receptive and had some great questions about event planning. How about you, have you had any interactions with Tommie?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 59,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(eve, \"How was your conversation with Tommie?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"id": "5b439f3c-7849-4432-a697-2bcc85b89dae",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"It was great meeting with you, Tommie. If you have any more questions or need any help in the future, don\\'t hesitate to reach out to me. Have a great day!\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 60,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(eve, \"What do you wish you would have said to Tommie?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
239
cookbook/gymnasium_agent_simulation.ipynb
Normal file
239
cookbook/gymnasium_agent_simulation.ipynb
Normal file
@@ -0,0 +1,239 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b089493",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Simulated Environment: Gymnasium\n",
|
||||
"\n",
|
||||
"For many applications of LLM agents, the environment is real (internet, database, REPL, etc). However, we can also define agents to interact in simulated environments like text-based games. This is an example of how to create a simple agent-environment interaction loop with [Gymnasium](https://github.com/Farama-Foundation/Gymnasium) (formerly [OpenAI Gym](https://github.com/openai/gym))."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f36427cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install gymnasium"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f9bd38b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tenacity\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e222e811",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "870c24bc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class GymnasiumAgent:\n",
|
||||
" @classmethod\n",
|
||||
" def get_docs(cls, env):\n",
|
||||
" return env.unwrapped.__doc__\n",
|
||||
"\n",
|
||||
" def __init__(self, model, env):\n",
|
||||
" self.model = model\n",
|
||||
" self.env = env\n",
|
||||
" self.docs = self.get_docs(env)\n",
|
||||
"\n",
|
||||
" self.instructions = \"\"\"\n",
|
||||
"Your goal is to maximize your return, i.e. the sum of the rewards you receive.\n",
|
||||
"I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:\n",
|
||||
"\n",
|
||||
"Observation: <observation>\n",
|
||||
"Reward: <reward>\n",
|
||||
"Termination: <termination>\n",
|
||||
"Truncation: <truncation>\n",
|
||||
"Return: <sum_of_rewards>\n",
|
||||
"\n",
|
||||
"You will respond with an action, formatted as:\n",
|
||||
"\n",
|
||||
"Action: <action>\n",
|
||||
"\n",
|
||||
"where you replace <action> with your actual action.\n",
|
||||
"Do nothing else but return the action.\n",
|
||||
"\"\"\"\n",
|
||||
" self.action_parser = RegexParser(\n",
|
||||
" regex=r\"Action: (.*)\", output_keys=[\"action\"], default_output_key=\"action\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" self.message_history = []\n",
|
||||
" self.ret = 0\n",
|
||||
"\n",
|
||||
" def random_action(self):\n",
|
||||
" action = self.env.action_space.sample()\n",
|
||||
" return action\n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" self.message_history = [\n",
|
||||
" SystemMessage(content=self.docs),\n",
|
||||
" SystemMessage(content=self.instructions),\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" def observe(self, obs, rew=0, term=False, trunc=False, info=None):\n",
|
||||
" self.ret += rew\n",
|
||||
"\n",
|
||||
" obs_message = f\"\"\"\n",
|
||||
"Observation: {obs}\n",
|
||||
"Reward: {rew}\n",
|
||||
"Termination: {term}\n",
|
||||
"Truncation: {trunc}\n",
|
||||
"Return: {self.ret}\n",
|
||||
" \"\"\"\n",
|
||||
" self.message_history.append(HumanMessage(content=obs_message))\n",
|
||||
" return obs_message\n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
"\n",
|
||||
" def act(self):\n",
|
||||
" try:\n",
|
||||
" for attempt in tenacity.Retrying(\n",
|
||||
" stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(\n",
|
||||
" f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"\n",
|
||||
" ),\n",
|
||||
" ):\n",
|
||||
" with attempt:\n",
|
||||
" action = self._act()\n",
|
||||
" except tenacity.RetryError:\n",
|
||||
" action = self.random_action()\n",
|
||||
" return action"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e76d22c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize the simulated environment and agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9e902cfd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"env = gym.make(\"Blackjack-v1\")\n",
|
||||
"agent = GymnasiumAgent(model=ChatOpenAI(temperature=0.2), env=env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2c12b15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Main loop"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ad361210",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Observation: (15, 4, 0)\n",
|
||||
"Reward: 0\n",
|
||||
"Termination: False\n",
|
||||
"Truncation: False\n",
|
||||
"Return: 0\n",
|
||||
" \n",
|
||||
"Action: 1\n",
|
||||
"\n",
|
||||
"Observation: (25, 4, 0)\n",
|
||||
"Reward: -1.0\n",
|
||||
"Termination: True\n",
|
||||
"Truncation: False\n",
|
||||
"Return: -1.0\n",
|
||||
" \n",
|
||||
"break True False\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"observation, info = env.reset()\n",
|
||||
"agent.reset()\n",
|
||||
"\n",
|
||||
"obs_message = agent.observe(observation)\n",
|
||||
"print(obs_message)\n",
|
||||
"\n",
|
||||
"while True:\n",
|
||||
" action = agent.act()\n",
|
||||
" observation, reward, termination, truncation, info = env.step(action)\n",
|
||||
" obs_message = agent.observe(observation, reward, termination, truncation, info)\n",
|
||||
" print(f\"Action: {action}\")\n",
|
||||
" print(obs_message)\n",
|
||||
"\n",
|
||||
" if termination or truncation:\n",
|
||||
" print(\"break\", termination, truncation)\n",
|
||||
" break\n",
|
||||
"env.close()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "58a13e9c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
136
cookbook/hugginggpt.ipynb
Normal file
136
cookbook/hugginggpt.ipynb
Normal file
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# HuggingGPT\n",
|
||||
"Implementation of [HuggingGPT](https://github.com/microsoft/JARVIS). HuggingGPT is a system to connect LLMs (ChatGPT) with ML community (Hugging Face).\n",
|
||||
"\n",
|
||||
"+ 🔥 Paper: https://arxiv.org/abs/2303.17580\n",
|
||||
"+ 🚀 Project: https://github.com/microsoft/JARVIS\n",
|
||||
"+ 🤗 Space: https://huggingface.co/spaces/microsoft/HuggingGPT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up tools\n",
|
||||
"\n",
|
||||
"We set up the tools available from [Transformers Agent](https://huggingface.co/docs/transformers/transformers_agents#tools). It includes a library of tools supported by Transformers and some customized tools such as image generator, video generator, text downloader and other tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from transformers import load_tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hf_tools = [\n",
|
||||
" load_tool(tool_name)\n",
|
||||
" for tool_name in [\n",
|
||||
" \"document-question-answering\",\n",
|
||||
" \"image-captioning\",\n",
|
||||
" \"image-question-answering\",\n",
|
||||
" \"image-segmentation\",\n",
|
||||
" \"speech-to-text\",\n",
|
||||
" \"summarization\",\n",
|
||||
" \"text-classification\",\n",
|
||||
" \"text-question-answering\",\n",
|
||||
" \"translation\",\n",
|
||||
" \"huggingface-tools/text-to-image\",\n",
|
||||
" \"huggingface-tools/text-to-video\",\n",
|
||||
" \"text-to-speech\",\n",
|
||||
" \"huggingface-tools/text-download\",\n",
|
||||
" \"huggingface-tools/image-transformation\",\n",
|
||||
" ]\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup model and HuggingGPT\n",
|
||||
"\n",
|
||||
"We create an instance of HuggingGPT and use ChatGPT as the controller to rule the above tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import HuggingGPT\n",
|
||||
"\n",
|
||||
"# %env OPENAI_API_BASE=http://localhost:8000/v1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo\")\n",
|
||||
"agent = HuggingGPT(llm, hf_tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run an example\n",
|
||||
"\n",
|
||||
"Given a text, show a related image and video."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent.run(\"please show me a video and an image of 'a boy is running'\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.17"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
210
cookbook/human_input_chat_model.ipynb
Normal file
210
cookbook/human_input_chat_model.ipynb
Normal file
@@ -0,0 +1,210 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Human input chat model\n",
|
||||
"\n",
|
||||
"Along with HumanInputLLM, LangChain also provides a pseudo chat model class that can be used for testing, debugging, or educational purposes. This allows you to mock out calls to the chat model and simulate how a human would respond if they received the messages.\n",
|
||||
"\n",
|
||||
"In this notebook, we go over how to use this.\n",
|
||||
"\n",
|
||||
"We start this with using the HumanInputChatModel in an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models.human import HumanInputChatModel"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Since we will use the `WikipediaQueryRun` tool in this notebook, you might need to install the `wikipedia` package if you haven't done so already."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/mskim58/dev/research/chatbot/github/langchain/.venv/bin/python: No module named pip\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = load_tools([\"wikipedia\"])\n",
|
||||
"llm = HumanInputChatModel()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\n",
|
||||
" ======= start of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"type: system\n",
|
||||
"data:\n",
|
||||
" content: \"Answer the following questions as best you can. You have access to the following tools:\\n\\nWikipedia: A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.\\n\\nThe way you use the tools is by specifying a json blob.\\nSpecifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\\n\\nThe only values that should be in the \\\"action\\\" field are: Wikipedia\\n\\nThe $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:\\n\\n```\\n{\\n \\\"action\\\": $TOOL_NAME,\\n \\\"action_input\\\": $INPUT\\n}\\n```\\n\\nALWAYS use the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction:\\n```\\n$JSON_BLOB\\n```\\nObservation: the result of the action\\n... (this Thought/Action/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nBegin! Reminder to always use the exact characters `Final Answer` when responding.\"\n",
|
||||
" additional_kwargs: {}\n",
|
||||
"\n",
|
||||
"======= end of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" ======= start of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"type: human\n",
|
||||
"data:\n",
|
||||
" content: 'What is Bocchi the Rock?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" '\n",
|
||||
" additional_kwargs: {}\n",
|
||||
" example: false\n",
|
||||
"\n",
|
||||
"======= end of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Wikipedia\",\n",
|
||||
" \"action_input\": \"What is Bocchi the Rock?\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mPage: Bocchi the Rock!\n",
|
||||
"Summary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Botchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\n",
|
||||
"An anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\n",
|
||||
"\n",
|
||||
"Page: Hitori Bocchi no Marumaru Seikatsu\n",
|
||||
"Summary: Hitori Bocchi no Marumaru Seikatsu (Japanese: ひとりぼっちの○○生活, lit. \"Bocchi Hitori's ____ Life\" or \"The ____ Life of Being Alone\") is a Japanese yonkoma manga series written and illustrated by Katsuwo. It was serialized in ASCII Media Works' Comic Dengeki Daioh \"g\" magazine from September 2013 to April 2021. Eight tankōbon volumes have been released. An anime television series adaptation by C2C aired from April to June 2019.\n",
|
||||
"\n",
|
||||
"Page: Kessoku Band (album)\n",
|
||||
"Summary: Kessoku Band (Japanese: 結束バンド, Hepburn: Kessoku Bando) is the debut studio album by Kessoku Band, a fictional musical group from the anime television series Bocchi the Rock!, released digitally on December 25, 2022, and physically on CD on December 28 by Aniplex. Featuring vocals from voice actresses Yoshino Aoyama, Sayumi Suzushiro, Saku Mizuno, and Ikumi Hasegawa, the album consists of 14 tracks previously heard in the anime, including a cover of Asian Kung-Fu Generation's \"Rockn' Roll, Morning Light Falls on You\", as well as newly recorded songs; nine singles preceded the album's physical release. Commercially, Kessoku Band peaked at number one on the Billboard Japan Hot Albums Chart and Oricon Albums Chart, and was certified gold by the Recording Industry Association of Japan.\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
" ======= start of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"type: system\n",
|
||||
"data:\n",
|
||||
" content: \"Answer the following questions as best you can. You have access to the following tools:\\n\\nWikipedia: A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.\\n\\nThe way you use the tools is by specifying a json blob.\\nSpecifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\\n\\nThe only values that should be in the \\\"action\\\" field are: Wikipedia\\n\\nThe $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:\\n\\n```\\n{\\n \\\"action\\\": $TOOL_NAME,\\n \\\"action_input\\\": $INPUT\\n}\\n```\\n\\nALWAYS use the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction:\\n```\\n$JSON_BLOB\\n```\\nObservation: the result of the action\\n... (this Thought/Action/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nBegin! Reminder to always use the exact characters `Final Answer` when responding.\"\n",
|
||||
" additional_kwargs: {}\n",
|
||||
"\n",
|
||||
"======= end of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" ======= start of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"type: human\n",
|
||||
"data:\n",
|
||||
" content: \"What is Bocchi the Rock?\\n\\nThis was your previous work (but I haven't seen any of it! I only see what you return as final answer):\\nAction:\\n```\\n{\\n \\\"action\\\": \\\"Wikipedia\\\",\\n \\\"action_input\\\": \\\"What is Bocchi the Rock?\\\"\\n}\\n```\\nObservation: Page: Bocchi the Rock!\\nSummary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Botchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\\nAn anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\\n\\nPage: Hitori Bocchi no Marumaru Seikatsu\\nSummary: Hitori Bocchi no Marumaru Seikatsu (Japanese: ひとりぼっちの○○生活, lit. \\\"Bocchi Hitori's ____ Life\\\" or \\\"The ____ Life of Being Alone\\\") is a Japanese yonkoma manga series written and illustrated by Katsuwo. It was serialized in ASCII Media Works' Comic Dengeki Daioh \\\"g\\\" magazine from September 2013 to April 2021. Eight tankōbon volumes have been released. An anime television series adaptation by C2C aired from April to June 2019.\\n\\nPage: Kessoku Band (album)\\nSummary: Kessoku Band (Japanese: 結束バンド, Hepburn: Kessoku Bando) is the debut studio album by Kessoku Band, a fictional musical group from the anime television series Bocchi the Rock!, released digitally on December 25, 2022, and physically on CD on December 28 by Aniplex. Featuring vocals from voice actresses Yoshino Aoyama, Sayumi Suzushiro, Saku Mizuno, and Ikumi Hasegawa, the album consists of 14 tracks previously heard in the anime, including a cover of Asian Kung-Fu Generation's \\\"Rockn' Roll, Morning Light Falls on You\\\", as well as newly recorded songs; nine singles preceded the album's physical release. Commercially, Kessoku Band peaked at number one on the Billboard Japan Hot Albums Chart and Oricon Albums Chart, and was certified gold by the Recording Industry Association of Japan.\\n\\n\\nThought:\"\n",
|
||||
" additional_kwargs: {}\n",
|
||||
" example: false\n",
|
||||
"\n",
|
||||
"======= end of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[32;1m\u001b[1;3mThis finally works.\n",
|
||||
"Final Answer: Bocchi the Rock! is a four-panel manga series and anime television series. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What is Bocchi the Rock?',\n",
|
||||
" 'output': \"Bocchi the Rock! is a four-panel manga series and anime television series. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent(\"What is Bocchi the Rock?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,11 +1,10 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How (and why) to use the the human input LLM\n",
|
||||
"# Human input LLM\n",
|
||||
"\n",
|
||||
"Similar to the fake LLM, LangChain provides a pseudo LLM class that can be used for testing, debugging, or educational purposes. This allows you to mock out calls to the LLM and simulate how a human would respond if they received the prompts.\n",
|
||||
"\n",
|
||||
@@ -29,9 +28,23 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import load_tools\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.agents import AgentType"
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Since we will use the `WikipediaQueryRun` tool in this notebook, you might need to install the `wikipedia` package if you haven't done so already."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -41,7 +54,11 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = load_tools([\"wikipedia\"])\n",
|
||||
"llm = HumanInputLLM(prompt_func=lambda prompt: print(f\"\\n===PROMPT====\\n{prompt}\\n=====END OF PROMPT======\"))"
|
||||
"llm = HumanInputLLM(\n",
|
||||
" prompt_func=lambda prompt: print(\n",
|
||||
" f\"\\n===PROMPT====\\n{prompt}\\n=====END OF PROMPT======\"\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -50,7 +67,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -203,7 +222,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -219,7 +238,6 @@
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "ab4db1680e5f8d10489fb83454f4ec01729e3bd5bdb28eaf0a13b95ddb6ae5ea"
|
||||
268
cookbook/hypothetical_document_embeddings.ipynb
Normal file
268
cookbook/hypothetical_document_embeddings.ipynb
Normal file
@@ -0,0 +1,268 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ccb74c9b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Improve document indexing with HyDE\n",
|
||||
"This notebook goes over how to use Hypothetical Document Embeddings (HyDE), as described in [this paper](https://arxiv.org/abs/2212.10496). \n",
|
||||
"\n",
|
||||
"At a high level, HyDE is an embedding technique that takes queries, generates a hypothetical answer, and then embeds that generated document and uses that as the final example. \n",
|
||||
"\n",
|
||||
"In order to use HyDE, we therefore need to provide a base embedding model, as well as an LLMChain that can be used to generate those documents. By default, the HyDE class comes with some default prompts to use (see the paper for more details on them), but we can also create our own."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "546e87ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c0ea895f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"base_embeddings = OpenAIEmbeddings()\n",
|
||||
"llm = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "33bd6905",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "50729989",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load with `web_search` prompt\n",
|
||||
"embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, \"web_search\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3aa573d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Now we can use it as any embedding class!\n",
|
||||
"result = embeddings.embed_query(\"Where is the Taj Mahal?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c7a0b556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multiple generations\n",
|
||||
"We can also generate multiple documents and then combine the embeddings for those. By default, we combine those by taking the average. We can do this by changing the LLM we use to generate documents to return multiple things."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "05da7060",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"multi_llm = OpenAI(n=4, best_of=4)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "9b1e12bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = HypotheticalDocumentEmbedder.from_llm(\n",
|
||||
" multi_llm, base_embeddings, \"web_search\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "a60cd343",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = embeddings.embed_query(\"Where is the Taj Mahal?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1da90437",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using our own prompts\n",
|
||||
"Besides using preconfigured prompts, we can also easily construct our own prompts and use those in the LLMChain that is generating the documents. This can be useful if we know the domain our queries will be in, as we can condition the prompt to generate text more similar to that.\n",
|
||||
"\n",
|
||||
"In the example below, let's condition it to generate text about a state of the union address (because we will use that in the next example)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "0b4a650f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt_template = \"\"\"Please answer the user's question about the most recent state of the union address\n",
|
||||
"Question: {question}\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"question\"], template=prompt_template)\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "7f7e2b86",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = HypotheticalDocumentEmbedder(\n",
|
||||
" llm_chain=llm_chain, base_embeddings=base_embeddings\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "6dd83424",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = embeddings.embed_query(\n",
|
||||
" \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "31388123",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using HyDE\n",
|
||||
"Now that we have HyDE, we can use it as we would any other embedding class! Here is using it to find similar passages in the state of the union example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "97719b29",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"with open(\"../../state_of_the_union.txt\") as f:\n",
|
||||
" state_of_the_union = f.read()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_text(state_of_the_union)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "bfcfc039",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docsearch = Chroma.from_texts(texts, embeddings)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "632af7f2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b9e57b93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
848
cookbook/learned_prompt_optimization.ipynb
Normal file
848
cookbook/learned_prompt_optimization.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -4,13 +4,13 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BashChain\n",
|
||||
"# Bash chain\n",
|
||||
"This notebook showcases using LLMs and a bash process to perform simple filesystem commands."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -37,14 +37,14 @@
|
||||
"'Hello World\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMBashChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain_experimental.llm_bash.base import LLMBashChain\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
@@ -65,12 +65,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain.chains.llm_bash.prompt import BashOutputParser\n",
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"_PROMPT_TEMPLATE = \"\"\"If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using this format:\n",
|
||||
"Question: \"copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'\"\n",
|
||||
@@ -89,12 +89,16 @@
|
||||
"That is the format. Begin!\n",
|
||||
"Question: {question}\"\"\"\n",
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate(input_variables=[\"question\"], template=_PROMPT_TEMPLATE, output_parser=BashOutputParser())"
|
||||
"PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=_PROMPT_TEMPLATE,\n",
|
||||
" output_parser=BashOutputParser(),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -121,7 +125,7 @@
|
||||
"'Hello World\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -145,7 +149,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -162,29 +166,24 @@
|
||||
"cd ..\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['ls', 'cd ..']\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mapi.ipynb\t\t\tllm_summarization_checker.ipynb\n",
|
||||
"constitutional_chain.ipynb\tmoderation.ipynb\n",
|
||||
"llm_bash.ipynb\t\t\topenai_openapi.yaml\n",
|
||||
"llm_checker.ipynb\t\topenapi.ipynb\n",
|
||||
"llm_math.ipynb\t\t\tpal.ipynb\n",
|
||||
"llm_requests.ipynb\t\tsqlite.ipynb\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mcpal.ipynb llm_bash.ipynb llm_symbolic_math.ipynb\n",
|
||||
"index.mdx llm_math.ipynb pal.ipynb\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'api.ipynb\\t\\t\\tllm_summarization_checker.ipynb\\r\\nconstitutional_chain.ipynb\\tmoderation.ipynb\\r\\nllm_bash.ipynb\\t\\t\\topenai_openapi.yaml\\r\\nllm_checker.ipynb\\t\\topenapi.ipynb\\r\\nllm_math.ipynb\\t\\t\\tpal.ipynb\\r\\nllm_requests.ipynb\\t\\tsqlite.ipynb'"
|
||||
"'cpal.ipynb llm_bash.ipynb llm_symbolic_math.ipynb\\r\\nindex.mdx llm_math.ipynb pal.ipynb'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.utilities.bash import BashProcess\n",
|
||||
"\n",
|
||||
"from langchain_experimental.llm_bash.bash import BashProcess\n",
|
||||
"\n",
|
||||
"persistent_process = BashProcess(persistent=True)\n",
|
||||
"bash_chain = LLMBashChain.from_llm(llm, bash_process=persistent_process, verbose=True)\n",
|
||||
@@ -196,7 +195,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -213,18 +212,19 @@
|
||||
"cd ..\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['ls', 'cd ..']\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mexamples\t\tgetting_started.ipynb\tindex_examples\n",
|
||||
"generic\t\t\thow_to_guides.rst\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m_category_.yml\tdata_generation.ipynb\t\t self_check\n",
|
||||
"agents\t\tgraph\n",
|
||||
"code_writing\tlearned_prompt_optimization.ipynb\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'examples\\t\\tgetting_started.ipynb\\tindex_examples\\r\\ngeneric\\t\\t\\thow_to_guides.rst'"
|
||||
"'_category_.yml\\tdata_generation.ipynb\\t\\t self_check\\r\\nagents\\t\\tgraph\\r\\ncode_writing\\tlearned_prompt_optimization.ipynb'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -233,13 +233,6 @@
|
||||
"# Run the same command again and see that the state is maintained between calls\n",
|
||||
"bash_chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -258,7 +251,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLMCheckerChain\n",
|
||||
"# Self-checking chain\n",
|
||||
"This notebook showcases how to use LLMCheckerChain."
|
||||
]
|
||||
},
|
||||
@@ -77,7 +77,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
87
cookbook/llm_math.ipynb
Normal file
87
cookbook/llm_math.ipynb
Normal file
@@ -0,0 +1,87 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e71e720f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Math chain\n",
|
||||
"\n",
|
||||
"This notebook showcases using LLMs and Python REPLs to do complex word math problems."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "44e9ba31",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"What is 13 raised to the .3432 power?\u001b[32;1m\u001b[1;3m\n",
|
||||
"```text\n",
|
||||
"13 ** .3432\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"13 ** .3432\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m2.4116004626599237\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: 2.4116004626599237'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMMathChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_math = LLMMathChain.from_llm(llm, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_math.run(\"What is 13 raised to the .3432 power?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e978bb8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLMSummarizationCheckerChain\n",
|
||||
"# Summarization checker chain\n",
|
||||
"This notebook shows some examples of LLMSummarizationCheckerChain in use with different types of texts. It has a few distinct differences from the `LLMCheckerChain`, in that it doesn't have any assumptions to the format of the input text (or summary).\n",
|
||||
"Additionally, as the LLMs like to hallucinate when fact checking or get confused by context, it is sometimes beneficial to run the checker multiple times. It does this by feeding the rewritten \"True\" result back on itself, and checking the \"facts\" for truth. As you can see from the examples below, this can be very effective in arriving at a generally true body of text.\n",
|
||||
"\n",
|
||||
@@ -1121,7 +1121,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
162
cookbook/llm_symbolic_math.ipynb
Normal file
162
cookbook/llm_symbolic_math.ipynb
Normal file
@@ -0,0 +1,162 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM Symbolic Math \n",
|
||||
"This notebook showcases using LLMs and Python to Solve Algebraic Equations. Under the hood is makes use of [SymPy](https://www.sympy.org/en/index.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain_experimental.llm_symbolic_math.base import LLMSymbolicMathChain\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_symbolic_math = LLMSymbolicMathChain.from_llm(llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Integrals and derivates"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: exp(x)*sin(x) + exp(x)*cos(x)'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.run(\"What is the derivative of sin(x)*exp(x) with respect to x?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: exp(x)*sin(x)'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.run(\n",
|
||||
" \"What is the integral of exp(x)*sin(x) + exp(x)*cos(x) with respect to x?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Solve linear and differential equations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: Eq(y(t), C2*exp(-t) + (C1 + t/2)*exp(t))'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.run('Solve the differential equation y\" - y = e^t')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: {0, -sqrt(3)*I/3, sqrt(3)*I/3}'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.run(\"What are the solutions to this equation y^3 + 1/3y?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: (3 - sqrt(7), -sqrt(7) - 2, 1 - sqrt(7)), (sqrt(7) + 3, -2 + sqrt(7), 1 + sqrt(7))'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.run(\"x = y + 5, y = z - 3, z = x * y. Solve for x, y, z\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -56,8 +56,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import OpenAI, LLMChain, PromptTemplate\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory"
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"from langchain.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -79,20 +81,20 @@
|
||||
" Assistant:\"\"\"\n",
|
||||
"\n",
|
||||
" prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"human_input\"], \n",
|
||||
" template=template\n",
|
||||
" input_variables=[\"history\", \"human_input\"], template=template\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0), \n",
|
||||
" prompt=prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" prompt=prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=ConversationBufferWindowMemory(),\n",
|
||||
" )\n",
|
||||
" return chain\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def initialize_meta_chain():\n",
|
||||
" meta_template=\"\"\"\n",
|
||||
" meta_template = \"\"\"\n",
|
||||
" Assistant has just had the below interactions with a User. Assistant followed their \"Instructions\" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future.\n",
|
||||
"\n",
|
||||
" ####\n",
|
||||
@@ -109,25 +111,26 @@
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" meta_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"chat_history\"], \n",
|
||||
" template=meta_template\n",
|
||||
" input_variables=[\"chat_history\"], template=meta_template\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" meta_chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0), \n",
|
||||
" prompt=meta_prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" prompt=meta_prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" )\n",
|
||||
" return meta_chain\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_chat_history(chain_memory):\n",
|
||||
" memory_key = chain_memory.memory_key\n",
|
||||
" chat_history = chain_memory.load_memory_variables(memory_key)[memory_key]\n",
|
||||
" return chat_history\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_new_instructions(meta_output):\n",
|
||||
" delimiter = 'Instructions: '\n",
|
||||
" new_instructions = meta_output[meta_output.find(delimiter)+len(delimiter):]\n",
|
||||
" delimiter = \"Instructions: \"\n",
|
||||
" new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :]\n",
|
||||
" return new_instructions"
|
||||
]
|
||||
},
|
||||
@@ -139,33 +142,33 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def main(task, max_iters=3, max_meta_iters=5):\n",
|
||||
" failed_phrase = 'task failed'\n",
|
||||
" success_phrase = 'task succeeded'\n",
|
||||
" failed_phrase = \"task failed\"\n",
|
||||
" success_phrase = \"task succeeded\"\n",
|
||||
" key_phrases = [success_phrase, failed_phrase]\n",
|
||||
" \n",
|
||||
" instructions = 'None'\n",
|
||||
"\n",
|
||||
" instructions = \"None\"\n",
|
||||
" for i in range(max_meta_iters):\n",
|
||||
" print(f'[Episode {i+1}/{max_meta_iters}]')\n",
|
||||
" print(f\"[Episode {i+1}/{max_meta_iters}]\")\n",
|
||||
" chain = initialize_chain(instructions, memory=None)\n",
|
||||
" output = chain.predict(human_input=task)\n",
|
||||
" for j in range(max_iters):\n",
|
||||
" print(f'(Step {j+1}/{max_iters})')\n",
|
||||
" print(f'Assistant: {output}')\n",
|
||||
" print(f'Human: ')\n",
|
||||
" print(f\"(Step {j+1}/{max_iters})\")\n",
|
||||
" print(f\"Assistant: {output}\")\n",
|
||||
" print(\"Human: \")\n",
|
||||
" human_input = input()\n",
|
||||
" if any(phrase in human_input.lower() for phrase in key_phrases):\n",
|
||||
" break\n",
|
||||
" output = chain.predict(human_input=human_input)\n",
|
||||
" if success_phrase in human_input.lower():\n",
|
||||
" print(f'You succeeded! Thanks for playing!')\n",
|
||||
" print(\"You succeeded! Thanks for playing!\")\n",
|
||||
" return\n",
|
||||
" meta_chain = initialize_meta_chain()\n",
|
||||
" meta_output = meta_chain.predict(chat_history=get_chat_history(chain.memory))\n",
|
||||
" print(f'Feedback: {meta_output}')\n",
|
||||
" print(f\"Feedback: {meta_output}\")\n",
|
||||
" instructions = get_new_instructions(meta_output)\n",
|
||||
" print(f'New Instructions: {instructions}')\n",
|
||||
" print('\\n'+'#'*80+'\\n')\n",
|
||||
" print(f'You failed! Thanks for playing!')"
|
||||
" print(f\"New Instructions: {instructions}\")\n",
|
||||
" print(\"\\n\" + \"#\" * 80 + \"\\n\")\n",
|
||||
" print(\"You failed! Thanks for playing!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
227
cookbook/multi_modal_QA.ipynb
Normal file
227
cookbook/multi_modal_QA.ipynb
Normal file
File diff suppressed because one or more lines are too long
499
cookbook/multi_modal_RAG_chroma.ipynb
Normal file
499
cookbook/multi_modal_RAG_chroma.ipynb
Normal file
File diff suppressed because one or more lines are too long
188
cookbook/multi_modal_output_agent.ipynb
Normal file
188
cookbook/multi_modal_output_agent.ipynb
Normal file
@@ -0,0 +1,188 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd835d40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Multi-modal outputs: Image & Text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fa88e03a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook shows how non-text producing tools can be used to create multi-modal agents.\n",
|
||||
"\n",
|
||||
"This example is limited to text and image outputs and uses UUIDs to transfer content across tools and agents. \n",
|
||||
"\n",
|
||||
"This example uses Steamship to generate and store generated images. Generated are auth protected by default. \n",
|
||||
"\n",
|
||||
"You can get your Steamship api key here: https://steamship.com/account/api"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0653da01",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"from IPython.display import Image\n",
|
||||
"from steamship import Block, Steamship"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f6933033",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.tools import SteamshipImageGenerationTool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "71e51e53",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a9fc769d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dall-E "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cd177dfe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [SteamshipImageGenerationTool(model_name=\"dall-e\")]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c71b1e46",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "603aeb9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output = mrkl.run(\"How would you visualize a parot playing soccer?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "25eb4efe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def show_output(output):\n",
|
||||
" \"\"\"Display the multi-modal output from the agent.\"\"\"\n",
|
||||
" UUID_PATTERN = re.compile(\n",
|
||||
" r\"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" outputs = UUID_PATTERN.split(output)\n",
|
||||
" outputs = [\n",
|
||||
" re.sub(r\"^\\W+\", \"\", el) for el in outputs\n",
|
||||
" ] # Clean trailing and leading non-word characters\n",
|
||||
"\n",
|
||||
" for output in outputs:\n",
|
||||
" maybe_block_id = UUID_PATTERN.search(output)\n",
|
||||
" if maybe_block_id:\n",
|
||||
" display(Image(Block.get(Steamship(), _id=maybe_block_id.group()).raw()))\n",
|
||||
" else:\n",
|
||||
" print(output, end=\"\\n\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e247b2c4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## StableDiffusion "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "315025e7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [SteamshipImageGenerationTool(model_name=\"stable-diffusion\")]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7930064a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "611a833d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output = mrkl.run(\"How would you visualize a parot playing soccer?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -26,13 +26,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Dict, Callable\n",
|
||||
"from typing import Callable, List\n",
|
||||
"\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -66,7 +65,7 @@
|
||||
" self.model = model\n",
|
||||
" self.prefix = f\"{self.name}: \"\n",
|
||||
" self.reset()\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" self.message_history = [\"Here is the conversation so far.\"]\n",
|
||||
"\n",
|
||||
@@ -118,7 +117,7 @@
|
||||
" self.agents = agents\n",
|
||||
" self._step = 0\n",
|
||||
" self.select_next_speaker = selection_function\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" for agent in self.agents:\n",
|
||||
" agent.reset()\n",
|
||||
@@ -167,7 +166,7 @@
|
||||
"character_names = [\"Harry Potter\", \"Ron Weasley\", \"Hermione Granger\", \"Argus Filch\"]\n",
|
||||
"storyteller_name = \"Dungeon Master\"\n",
|
||||
"quest = \"Find all of Lord Voldemort's seven horcruxes.\"\n",
|
||||
"word_limit = 50 # word limit for task brainstorming"
|
||||
"word_limit = 50 # word limit for task brainstorming"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -188,24 +187,30 @@
|
||||
" The story is narrated by the storyteller, {storyteller_name}.\"\"\"\n",
|
||||
"\n",
|
||||
"player_descriptor_system_message = SystemMessage(\n",
|
||||
" content=\"You can add detail to the description of a Dungeons & Dragons player.\")\n",
|
||||
" content=\"You can add detail to the description of a Dungeons & Dragons player.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_character_description(character_name):\n",
|
||||
" character_specifier_prompt = [\n",
|
||||
" player_descriptor_system_message,\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" HumanMessage(\n",
|
||||
" content=f\"\"\"{game_description}\n",
|
||||
" Please reply with a creative description of the character, {character_name}, in {word_limit} words or less. \n",
|
||||
" Speak directly to {character_name}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" character_description = ChatOpenAI(temperature=1.0)(character_specifier_prompt).content\n",
|
||||
" character_description = ChatOpenAI(temperature=1.0)(\n",
|
||||
" character_specifier_prompt\n",
|
||||
" ).content\n",
|
||||
" return character_description\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_character_system_message(character_name, character_description):\n",
|
||||
" return SystemMessage(content=(\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" return SystemMessage(\n",
|
||||
" content=(\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" Your name is {character_name}. \n",
|
||||
" Your character description is as follows: {character_description}.\n",
|
||||
" You will propose actions you plan to take and {storyteller_name} will explain what happens when you take those actions.\n",
|
||||
@@ -218,24 +223,36 @@
|
||||
" Never forget to keep your response to {word_limit} words!\n",
|
||||
" Do not add anything else.\n",
|
||||
" \"\"\"\n",
|
||||
" ))\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"character_descriptions = [generate_character_description(character_name) for character_name in character_names]\n",
|
||||
"character_system_messages = [generate_character_system_message(character_name, character_description) for character_name, character_description in zip(character_names, character_descriptions)]\n",
|
||||
"\n",
|
||||
"character_descriptions = [\n",
|
||||
" generate_character_description(character_name) for character_name in character_names\n",
|
||||
"]\n",
|
||||
"character_system_messages = [\n",
|
||||
" generate_character_system_message(character_name, character_description)\n",
|
||||
" for character_name, character_description in zip(\n",
|
||||
" character_names, character_descriptions\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"storyteller_specifier_prompt = [\n",
|
||||
" player_descriptor_system_message,\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" HumanMessage(\n",
|
||||
" content=f\"\"\"{game_description}\n",
|
||||
" Please reply with a creative description of the storyteller, {storyteller_name}, in {word_limit} words or less. \n",
|
||||
" Speak directly to {storyteller_name}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"storyteller_description = ChatOpenAI(temperature=1.0)(storyteller_specifier_prompt).content\n",
|
||||
"storyteller_description = ChatOpenAI(temperature=1.0)(\n",
|
||||
" storyteller_specifier_prompt\n",
|
||||
").content\n",
|
||||
"\n",
|
||||
"storyteller_system_message = SystemMessage(content=(\n",
|
||||
"f\"\"\"{game_description}\n",
|
||||
"storyteller_system_message = SystemMessage(\n",
|
||||
" content=(\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
"You are the storyteller, {storyteller_name}. \n",
|
||||
"Your description is as follows: {storyteller_description}.\n",
|
||||
"The other players will propose actions to take and you will explain what happens when they take those actions.\n",
|
||||
@@ -247,7 +264,8 @@
|
||||
"Never forget to keep your response to {word_limit} words!\n",
|
||||
"Do not add anything else.\n",
|
||||
"\"\"\"\n",
|
||||
"))"
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -273,10 +291,12 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print('Storyteller Description:')\n",
|
||||
"print(\"Storyteller Description:\")\n",
|
||||
"print(storyteller_description)\n",
|
||||
"for character_name, character_description in zip(character_names, character_descriptions):\n",
|
||||
" print(f'{character_name} Description:')\n",
|
||||
"for character_name, character_description in zip(\n",
|
||||
" character_names, character_descriptions\n",
|
||||
"):\n",
|
||||
" print(f\"{character_name} Description:\")\n",
|
||||
" print(character_description)"
|
||||
]
|
||||
},
|
||||
@@ -308,15 +328,15 @@
|
||||
"source": [
|
||||
"quest_specifier_prompt = [\n",
|
||||
" SystemMessage(content=\"You can make a task more specific.\"),\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" HumanMessage(\n",
|
||||
" content=f\"\"\"{game_description}\n",
|
||||
" \n",
|
||||
" You are the storyteller, {storyteller_name}.\n",
|
||||
" Please make the quest more specific. Be creative and imaginative.\n",
|
||||
" Please reply with the specified quest in {word_limit} words or less. \n",
|
||||
" Speak directly to the characters: {*character_names,}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"specified_quest = ChatOpenAI(temperature=1.0)(quest_specifier_prompt).content\n",
|
||||
"\n",
|
||||
@@ -338,14 +358,21 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"characters = []\n",
|
||||
"for character_name, character_system_message in zip(character_names, character_system_messages):\n",
|
||||
" characters.append(DialogueAgent(\n",
|
||||
" name=character_name,\n",
|
||||
" system_message=character_system_message, \n",
|
||||
" model=ChatOpenAI(temperature=0.2)))\n",
|
||||
"storyteller = DialogueAgent(name=storyteller_name,\n",
|
||||
" system_message=storyteller_system_message, \n",
|
||||
" model=ChatOpenAI(temperature=0.2))"
|
||||
"for character_name, character_system_message in zip(\n",
|
||||
" character_names, character_system_messages\n",
|
||||
"):\n",
|
||||
" characters.append(\n",
|
||||
" DialogueAgent(\n",
|
||||
" name=character_name,\n",
|
||||
" system_message=character_system_message,\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"storyteller = DialogueAgent(\n",
|
||||
" name=storyteller_name,\n",
|
||||
" system_message=storyteller_system_message,\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -358,7 +385,7 @@
|
||||
" \"\"\"\n",
|
||||
" If the step is even, then select the storyteller\n",
|
||||
" Otherwise, select the other characters in a round-robin fashion.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" For example, with three characters with indices: 1 2 3\n",
|
||||
" The storyteller is index 0.\n",
|
||||
" Then the selected index will be as follows:\n",
|
||||
@@ -370,7 +397,7 @@
|
||||
" if step % 2 == 0:\n",
|
||||
" idx = 0\n",
|
||||
" else:\n",
|
||||
" idx = (step//2) % (len(agents)-1) + 1\n",
|
||||
" idx = (step // 2) % (len(agents) - 1) + 1\n",
|
||||
" return idx"
|
||||
]
|
||||
},
|
||||
@@ -457,18 +484,17 @@
|
||||
"n = 0\n",
|
||||
"\n",
|
||||
"simulator = DialogueSimulator(\n",
|
||||
" agents=[storyteller] + characters,\n",
|
||||
" selection_function=select_next_speaker\n",
|
||||
" agents=[storyteller] + characters, selection_function=select_next_speaker\n",
|
||||
")\n",
|
||||
"simulator.reset()\n",
|
||||
"simulator.inject(storyteller_name, specified_quest)\n",
|
||||
"print(f\"({storyteller_name}): {specified_quest}\")\n",
|
||||
"print('\\n')\n",
|
||||
"print(\"\\n\")\n",
|
||||
"\n",
|
||||
"while n < max_iters:\n",
|
||||
" name, message = simulator.step()\n",
|
||||
" print(f\"({name}): {message}\")\n",
|
||||
" print('\\n')\n",
|
||||
" print(\"\\n\")\n",
|
||||
" n += 1"
|
||||
]
|
||||
},
|
||||
@@ -27,26 +27,20 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from collections import OrderedDict\n",
|
||||
"import functools\n",
|
||||
"import random\n",
|
||||
"import re\n",
|
||||
"import tenacity\n",
|
||||
"from typing import List, Dict, Callable\n",
|
||||
"from collections import OrderedDict\n",
|
||||
"from typing import Callable, List\n",
|
||||
"\n",
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate, \n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" PromptTemplate\n",
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"import tenacity\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"from langchain.prompts import (\n",
|
||||
" PromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -76,7 +70,7 @@
|
||||
" self.model = model\n",
|
||||
" self.prefix = f\"{self.name}: \"\n",
|
||||
" self.reset()\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" self.message_history = [\"Here is the conversation so far.\"]\n",
|
||||
"\n",
|
||||
@@ -109,7 +103,7 @@
|
||||
" self.agents = agents\n",
|
||||
" self._step = 0\n",
|
||||
" self.select_next_speaker = selection_function\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" for agent in self.agents:\n",
|
||||
" agent.reset()\n",
|
||||
@@ -168,7 +162,8 @@
|
||||
"source": [
|
||||
"class IntegerOutputParser(RegexParser):\n",
|
||||
" def get_format_instructions(self) -> str:\n",
|
||||
" return 'Your response should be an integer delimited by angled brackets, like this: <int>.' \n",
|
||||
" return \"Your response should be an integer delimited by angled brackets, like this: <int>.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class DirectorDialogueAgent(DialogueAgent):\n",
|
||||
" def __init__(\n",
|
||||
@@ -181,13 +176,13 @@
|
||||
" ) -> None:\n",
|
||||
" super().__init__(name, system_message, model)\n",
|
||||
" self.speakers = speakers\n",
|
||||
" self.next_speaker = ''\n",
|
||||
" \n",
|
||||
" self.next_speaker = \"\"\n",
|
||||
"\n",
|
||||
" self.stop = False\n",
|
||||
" self.stopping_probability = stopping_probability\n",
|
||||
" self.termination_clause = 'Finish the conversation by stating a concluding message and thanking everyone.'\n",
|
||||
" self.continuation_clause = 'Do not end the conversation. Keep the conversation going by adding your own ideas.'\n",
|
||||
" \n",
|
||||
" self.termination_clause = \"Finish the conversation by stating a concluding message and thanking everyone.\"\n",
|
||||
" self.continuation_clause = \"Do not end the conversation. Keep the conversation going by adding your own ideas.\"\n",
|
||||
"\n",
|
||||
" # 1. have a prompt for generating a response to the previous speaker\n",
|
||||
" self.response_prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"message_history\", \"termination_clause\"],\n",
|
||||
@@ -196,13 +191,13 @@
|
||||
"Follow up with an insightful comment.\n",
|
||||
"{{termination_clause}}\n",
|
||||
"{self.prefix}\n",
|
||||
" \"\"\")\n",
|
||||
" \n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # 2. have a prompt for deciding who to speak next\n",
|
||||
" self.choice_parser = IntegerOutputParser(\n",
|
||||
" regex=r'<(\\d+)>', \n",
|
||||
" output_keys=['choice'], \n",
|
||||
" default_output_key='choice') \n",
|
||||
" regex=r\"<(\\d+)>\", output_keys=[\"choice\"], default_output_key=\"choice\"\n",
|
||||
" )\n",
|
||||
" self.choose_next_speaker_prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"message_history\", \"speaker_names\"],\n",
|
||||
" template=f\"\"\"{{message_history}}\n",
|
||||
@@ -213,8 +208,9 @@
|
||||
"{self.choice_parser.get_format_instructions()}\n",
|
||||
"\n",
|
||||
"Do nothing else.\n",
|
||||
" \"\"\")\n",
|
||||
" \n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # 3. have a prompt for prompting the next speaker to speak\n",
|
||||
" self.prompt_next_speaker_prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"message_history\", \"next_speaker\"],\n",
|
||||
@@ -223,40 +219,48 @@
|
||||
"The next speaker is {{next_speaker}}. \n",
|
||||
"Prompt the next speaker to speak with an insightful question.\n",
|
||||
"{self.prefix}\n",
|
||||
" \"\"\")\n",
|
||||
" \n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def _generate_response(self):\n",
|
||||
" # if self.stop = True, then we will inject the prompt with a termination clause\n",
|
||||
" sample = random.uniform(0,1)\n",
|
||||
" sample = random.uniform(0, 1)\n",
|
||||
" self.stop = sample < self.stopping_probability\n",
|
||||
" \n",
|
||||
" print(f'\\tStop? {self.stop}\\n')\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" print(f\"\\tStop? {self.stop}\\n\")\n",
|
||||
"\n",
|
||||
" response_prompt = self.response_prompt_template.format(\n",
|
||||
" message_history='\\n'.join(self.message_history),\n",
|
||||
" termination_clause=self.termination_clause if self.stop else ''\n",
|
||||
" message_history=\"\\n\".join(self.message_history),\n",
|
||||
" termination_clause=self.termination_clause if self.stop else \"\",\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" self.response = self.model(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=response_prompt),\n",
|
||||
" ]\n",
|
||||
" ).content\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" return self.response\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" @tenacity.retry(stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"),\n",
|
||||
" retry_error_callback=lambda retry_state: 0) # Default value when all retries are exhausted\n",
|
||||
" def _choose_next_speaker(self) -> str: \n",
|
||||
" speaker_names = '\\n'.join([f'{idx}: {name}' for idx, name in enumerate(self.speakers)])\n",
|
||||
"\n",
|
||||
" @tenacity.retry(\n",
|
||||
" stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(\n",
|
||||
" f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"\n",
|
||||
" ),\n",
|
||||
" retry_error_callback=lambda retry_state: 0,\n",
|
||||
" ) # Default value when all retries are exhausted\n",
|
||||
" def _choose_next_speaker(self) -> str:\n",
|
||||
" speaker_names = \"\\n\".join(\n",
|
||||
" [f\"{idx}: {name}\" for idx, name in enumerate(self.speakers)]\n",
|
||||
" )\n",
|
||||
" choice_prompt = self.choose_next_speaker_prompt_template.format(\n",
|
||||
" message_history='\\n'.join(self.message_history + [self.prefix] + [self.response]),\n",
|
||||
" speaker_names=speaker_names\n",
|
||||
" message_history=\"\\n\".join(\n",
|
||||
" self.message_history + [self.prefix] + [self.response]\n",
|
||||
" ),\n",
|
||||
" speaker_names=speaker_names,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" choice_string = self.model(\n",
|
||||
@@ -265,13 +269,13 @@
|
||||
" HumanMessage(content=choice_prompt),\n",
|
||||
" ]\n",
|
||||
" ).content\n",
|
||||
" choice = int(self.choice_parser.parse(choice_string)['choice'])\n",
|
||||
" \n",
|
||||
" choice = int(self.choice_parser.parse(choice_string)[\"choice\"])\n",
|
||||
"\n",
|
||||
" return choice\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def select_next_speaker(self):\n",
|
||||
" return self.chosen_speaker_id\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def send(self) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Applies the chatmodel to the message history\n",
|
||||
@@ -279,19 +283,21 @@
|
||||
" \"\"\"\n",
|
||||
" # 1. generate and save response to the previous speaker\n",
|
||||
" self.response = self._generate_response()\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" if self.stop:\n",
|
||||
" message = self.response \n",
|
||||
" message = self.response\n",
|
||||
" else:\n",
|
||||
" # 2. decide who to speak next\n",
|
||||
" self.chosen_speaker_id = self._choose_next_speaker()\n",
|
||||
" self.next_speaker = self.speakers[self.chosen_speaker_id]\n",
|
||||
" print(f'\\tNext speaker: {self.next_speaker}\\n')\n",
|
||||
" print(f\"\\tNext speaker: {self.next_speaker}\\n\")\n",
|
||||
"\n",
|
||||
" # 3. prompt the next speaker to speak\n",
|
||||
" next_prompt = self.prompt_next_speaker_prompt_template.format(\n",
|
||||
" message_history=\"\\n\".join(self.message_history + [self.prefix] + [self.response]),\n",
|
||||
" next_speaker=self.next_speaker\n",
|
||||
" message_history=\"\\n\".join(\n",
|
||||
" self.message_history + [self.prefix] + [self.response]\n",
|
||||
" ),\n",
|
||||
" next_speaker=self.next_speaker,\n",
|
||||
" )\n",
|
||||
" message = self.model(\n",
|
||||
" [\n",
|
||||
@@ -299,8 +305,8 @@
|
||||
" HumanMessage(content=next_prompt),\n",
|
||||
" ]\n",
|
||||
" ).content\n",
|
||||
" message = ' '.join([self.response, message])\n",
|
||||
" \n",
|
||||
" message = \" \".join([self.response, message])\n",
|
||||
"\n",
|
||||
" return message"
|
||||
]
|
||||
},
|
||||
@@ -319,12 +325,14 @@
|
||||
"source": [
|
||||
"topic = \"The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze\"\n",
|
||||
"director_name = \"Jon Stewart\"\n",
|
||||
"agent_summaries = OrderedDict({\n",
|
||||
" \"Jon Stewart\": (\"Host of the Daily Show\", \"New York\"),\n",
|
||||
" \"Samantha Bee\": (\"Hollywood Correspondent\", \"Los Angeles\"), \n",
|
||||
" \"Aasif Mandvi\": (\"CIA Correspondent\", \"Washington D.C.\"),\n",
|
||||
" \"Ronny Chieng\": (\"Average American Correspondent\", \"Cleveland, Ohio\"),\n",
|
||||
"})\n",
|
||||
"agent_summaries = OrderedDict(\n",
|
||||
" {\n",
|
||||
" \"Jon Stewart\": (\"Host of the Daily Show\", \"New York\"),\n",
|
||||
" \"Samantha Bee\": (\"Hollywood Correspondent\", \"Los Angeles\"),\n",
|
||||
" \"Aasif Mandvi\": (\"CIA Correspondent\", \"Washington D.C.\"),\n",
|
||||
" \"Ronny Chieng\": (\"Average American Correspondent\", \"Cleveland, Ohio\"),\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"word_limit = 50"
|
||||
]
|
||||
},
|
||||
@@ -341,28 +349,37 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_summary_string = '\\n- '.join([''] + [f'{name}: {role}, located in {location}' for name, (role, location) in agent_summaries.items()])\n",
|
||||
"agent_summary_string = \"\\n- \".join(\n",
|
||||
" [\"\"]\n",
|
||||
" + [\n",
|
||||
" f\"{name}: {role}, located in {location}\"\n",
|
||||
" for name, (role, location) in agent_summaries.items()\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"conversation_description = f\"\"\"This is a Daily Show episode discussing the following topic: {topic}.\n",
|
||||
"\n",
|
||||
"The episode features {agent_summary_string}.\"\"\"\n",
|
||||
"\n",
|
||||
"agent_descriptor_system_message = SystemMessage(\n",
|
||||
" content=\"You can add detail to the description of each person.\")\n",
|
||||
" content=\"You can add detail to the description of each person.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_agent_description(agent_name, agent_role, agent_location):\n",
|
||||
" agent_specifier_prompt = [\n",
|
||||
" agent_descriptor_system_message,\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{conversation_description}\n",
|
||||
" HumanMessage(\n",
|
||||
" content=f\"\"\"{conversation_description}\n",
|
||||
" Please reply with a creative description of {agent_name}, who is a {agent_role} in {agent_location}, that emphasizes their particular role and location.\n",
|
||||
" Speak directly to {agent_name} in {word_limit} words or less.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" agent_description = ChatOpenAI(temperature=1.0)(agent_specifier_prompt).content\n",
|
||||
" return agent_description\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_agent_header(agent_name, agent_role, agent_location, agent_description):\n",
|
||||
" return f\"\"\"{conversation_description}\n",
|
||||
"\n",
|
||||
@@ -375,9 +392,11 @@
|
||||
"Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_agent_system_message(agent_name, agent_header):\n",
|
||||
" return SystemMessage(content=(\n",
|
||||
" f\"\"\"{agent_header}\n",
|
||||
" return SystemMessage(\n",
|
||||
" content=(\n",
|
||||
" f\"\"\"{agent_header}\n",
|
||||
"You will speak in the style of {agent_name}, and exaggerate your personality.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
"Speak in the first person from the perspective of {agent_name}\n",
|
||||
@@ -389,12 +408,24 @@
|
||||
"Never forget to keep your response to {word_limit} words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \"\"\"\n",
|
||||
" ))\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"agent_descriptions = [generate_agent_description(name, role, location) for name, (role, location) in agent_summaries.items()]\n",
|
||||
"agent_headers = [generate_agent_header(name, role, location, description) for (name, (role, location)), description in zip(agent_summaries.items(), agent_descriptions)]\n",
|
||||
"agent_system_messages = [generate_agent_system_message(name, header) for name, header in zip(agent_summaries, agent_headers)]\n",
|
||||
" "
|
||||
"\n",
|
||||
"agent_descriptions = [\n",
|
||||
" generate_agent_description(name, role, location)\n",
|
||||
" for name, (role, location) in agent_summaries.items()\n",
|
||||
"]\n",
|
||||
"agent_headers = [\n",
|
||||
" generate_agent_header(name, role, location, description)\n",
|
||||
" for (name, (role, location)), description in zip(\n",
|
||||
" agent_summaries.items(), agent_descriptions\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"agent_system_messages = [\n",
|
||||
" generate_agent_system_message(name, header)\n",
|
||||
" for name, header in zip(agent_summaries, agent_headers)\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -618,11 +649,13 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for name, description, header, system_message in zip(agent_summaries, agent_descriptions, agent_headers, agent_system_messages):\n",
|
||||
" print(f'\\n\\n{name} Description:')\n",
|
||||
" print(f'\\n{description}')\n",
|
||||
" print(f'\\nHeader:\\n{header}')\n",
|
||||
" print(f'\\nSystem Message:\\n{system_message.content}')\n"
|
||||
"for name, description, header, system_message in zip(\n",
|
||||
" agent_summaries, agent_descriptions, agent_headers, agent_system_messages\n",
|
||||
"):\n",
|
||||
" print(f\"\\n\\n{name} Description:\")\n",
|
||||
" print(f\"\\n{description}\")\n",
|
||||
" print(f\"\\nHeader:\\n{header}\")\n",
|
||||
" print(f\"\\nSystem Message:\\n{system_message.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -653,15 +686,15 @@
|
||||
"source": [
|
||||
"topic_specifier_prompt = [\n",
|
||||
" SystemMessage(content=\"You can make a task more specific.\"),\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{conversation_description}\n",
|
||||
" HumanMessage(\n",
|
||||
" content=f\"\"\"{conversation_description}\n",
|
||||
" \n",
|
||||
" Please elaborate on the topic. \n",
|
||||
" Frame the topic as a single question to be answered.\n",
|
||||
" Be creative and imaginative.\n",
|
||||
" Please reply with the specified topic in {word_limit} words or less. \n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content\n",
|
||||
"\n",
|
||||
@@ -685,11 +718,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def select_next_speaker(step: int, agents: List[DialogueAgent], director: DirectorDialogueAgent) -> int:\n",
|
||||
"def select_next_speaker(\n",
|
||||
" step: int, agents: List[DialogueAgent], director: DirectorDialogueAgent\n",
|
||||
") -> int:\n",
|
||||
" \"\"\"\n",
|
||||
" If the step is even, then select the director\n",
|
||||
" Otherwise, the director selects the next speaker.\n",
|
||||
" \"\"\" \n",
|
||||
" \"\"\"\n",
|
||||
" # the director speaks on odd steps\n",
|
||||
" if step % 2 == 1:\n",
|
||||
" idx = 0\n",
|
||||
@@ -713,20 +748,24 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"director = DirectorDialogueAgent(\n",
|
||||
" name=director_name, \n",
|
||||
" name=director_name,\n",
|
||||
" system_message=agent_system_messages[0],\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" speakers=[name for name in agent_summaries if name != director_name],\n",
|
||||
" stopping_probability=0.2\n",
|
||||
" stopping_probability=0.2,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agents = [director]\n",
|
||||
"for name, system_message in zip(list(agent_summaries.keys())[1:], agent_system_messages[1:]): \n",
|
||||
" agents.append(DialogueAgent(\n",
|
||||
" name=name,\n",
|
||||
" system_message=system_message,\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" ))"
|
||||
"for name, system_message in zip(\n",
|
||||
" list(agent_summaries.keys())[1:], agent_system_messages[1:]\n",
|
||||
"):\n",
|
||||
" agents.append(\n",
|
||||
" DialogueAgent(\n",
|
||||
" name=name,\n",
|
||||
" system_message=system_message,\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -802,19 +841,19 @@
|
||||
"source": [
|
||||
"simulator = DialogueSimulator(\n",
|
||||
" agents=agents,\n",
|
||||
" selection_function=functools.partial(select_next_speaker, director=director)\n",
|
||||
" selection_function=functools.partial(select_next_speaker, director=director),\n",
|
||||
")\n",
|
||||
"simulator.reset()\n",
|
||||
"simulator.inject('Audience member', specified_topic)\n",
|
||||
"simulator.inject(\"Audience member\", specified_topic)\n",
|
||||
"print(f\"(Audience member): {specified_topic}\")\n",
|
||||
"print('\\n')\n",
|
||||
"print(\"\\n\")\n",
|
||||
"\n",
|
||||
"while True:\n",
|
||||
" name, message = simulator.step()\n",
|
||||
" print(f\"({name}): {message}\")\n",
|
||||
" print('\\n')\n",
|
||||
" print(\"\\n\")\n",
|
||||
" if director.stop:\n",
|
||||
" break\n"
|
||||
" break"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -24,17 +24,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate\n",
|
||||
"import re\n",
|
||||
"from typing import Callable, List\n",
|
||||
"\n",
|
||||
"import tenacity\n",
|
||||
"from typing import List, Dict, Callable\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -64,7 +62,7 @@
|
||||
" self.model = model\n",
|
||||
" self.prefix = f\"{self.name}: \"\n",
|
||||
" self.reset()\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" self.message_history = [\"Here is the conversation so far.\"]\n",
|
||||
"\n",
|
||||
@@ -97,7 +95,7 @@
|
||||
" self.agents = agents\n",
|
||||
" self._step = 0\n",
|
||||
" self.select_next_speaker = selection_function\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" for agent in self.agents:\n",
|
||||
" agent.reset()\n",
|
||||
@@ -154,20 +152,20 @@
|
||||
" ) -> None:\n",
|
||||
" super().__init__(name, system_message, model)\n",
|
||||
" self.bidding_template = bidding_template\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def bid(self) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Asks the chat model to output a bid to speak\n",
|
||||
" \"\"\"\n",
|
||||
" prompt = PromptTemplate(\n",
|
||||
" input_variables=['message_history', 'recent_message'],\n",
|
||||
" template = self.bidding_template\n",
|
||||
" input_variables=[\"message_history\", \"recent_message\"],\n",
|
||||
" template=self.bidding_template,\n",
|
||||
" ).format(\n",
|
||||
" message_history='\\n'.join(self.message_history),\n",
|
||||
" recent_message=self.message_history[-1])\n",
|
||||
" message_history=\"\\n\".join(self.message_history),\n",
|
||||
" recent_message=self.message_history[-1],\n",
|
||||
" )\n",
|
||||
" bid_string = self.model([SystemMessage(content=prompt)]).content\n",
|
||||
" return bid_string\n",
|
||||
" "
|
||||
" return bid_string"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -205,21 +203,26 @@
|
||||
"The presidential candidates are: {', '.join(character_names)}.\"\"\"\n",
|
||||
"\n",
|
||||
"player_descriptor_system_message = SystemMessage(\n",
|
||||
" content=\"You can add detail to the description of each presidential candidate.\")\n",
|
||||
" content=\"You can add detail to the description of each presidential candidate.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_character_description(character_name):\n",
|
||||
" character_specifier_prompt = [\n",
|
||||
" player_descriptor_system_message,\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" HumanMessage(\n",
|
||||
" content=f\"\"\"{game_description}\n",
|
||||
" Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities. \n",
|
||||
" Speak directly to {character_name}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" character_description = ChatOpenAI(temperature=1.0)(character_specifier_prompt).content\n",
|
||||
" character_description = ChatOpenAI(temperature=1.0)(\n",
|
||||
" character_specifier_prompt\n",
|
||||
" ).content\n",
|
||||
" return character_description\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_character_header(character_name, character_description):\n",
|
||||
" return f\"\"\"{game_description}\n",
|
||||
"Your name is {character_name}.\n",
|
||||
@@ -229,9 +232,11 @@
|
||||
"Your goal is to be as creative as possible and make the voters think you are the best candidate.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def generate_character_system_message(character_name, character_header):\n",
|
||||
" return SystemMessage(content=(\n",
|
||||
" f\"\"\"{character_header}\n",
|
||||
" return SystemMessage(\n",
|
||||
" content=(\n",
|
||||
" f\"\"\"{character_header}\n",
|
||||
"You will speak in the style of {character_name}, and exaggerate their personality.\n",
|
||||
"You will come up with creative ideas related to {topic}.\n",
|
||||
"Do not say the same things over and over again.\n",
|
||||
@@ -244,12 +249,23 @@
|
||||
"Never forget to keep your response to {word_limit} words!\n",
|
||||
"Do not add anything else.\n",
|
||||
" \"\"\"\n",
|
||||
" ))\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"character_descriptions = [generate_character_description(character_name) for character_name in character_names]\n",
|
||||
"character_headers = [generate_character_header(character_name, character_description) for character_name, character_description in zip(character_names, character_descriptions)]\n",
|
||||
"character_system_messages = [generate_character_system_message(character_name, character_headers) for character_name, character_headers in zip(character_names, character_headers)]\n",
|
||||
" "
|
||||
"\n",
|
||||
"character_descriptions = [\n",
|
||||
" generate_character_description(character_name) for character_name in character_names\n",
|
||||
"]\n",
|
||||
"character_headers = [\n",
|
||||
" generate_character_header(character_name, character_description)\n",
|
||||
" for character_name, character_description in zip(\n",
|
||||
" character_names, character_descriptions\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"character_system_messages = [\n",
|
||||
" generate_character_system_message(character_name, character_headers)\n",
|
||||
" for character_name, character_headers in zip(character_names, character_headers)\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -370,11 +386,21 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for character_name, character_description, character_header, character_system_message in zip(character_names, character_descriptions, character_headers, character_system_messages):\n",
|
||||
" print(f'\\n\\n{character_name} Description:')\n",
|
||||
" print(f'\\n{character_description}')\n",
|
||||
" print(f'\\n{character_header}')\n",
|
||||
" print(f'\\n{character_system_message.content}')\n"
|
||||
"for (\n",
|
||||
" character_name,\n",
|
||||
" character_description,\n",
|
||||
" character_header,\n",
|
||||
" character_system_message,\n",
|
||||
") in zip(\n",
|
||||
" character_names,\n",
|
||||
" character_descriptions,\n",
|
||||
" character_headers,\n",
|
||||
" character_system_messages,\n",
|
||||
"):\n",
|
||||
" print(f\"\\n\\n{character_name} Description:\")\n",
|
||||
" print(f\"\\n{character_description}\")\n",
|
||||
" print(f\"\\n{character_header}\")\n",
|
||||
" print(f\"\\n{character_system_message.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -386,7 +412,7 @@
|
||||
"1. define a format they will produce their outputs in\n",
|
||||
"2. parse their outputs\n",
|
||||
"\n",
|
||||
"We can subclass the [RegexParser](https://github.com/hwchase17/langchain/blob/master/langchain/output_parsers/regex.py) to implement our own custom output parser for bids."
|
||||
"We can subclass the [RegexParser](https://github.com/langchain-ai/langchain/blob/master/langchain/output_parsers/regex.py) to implement our own custom output parser for bids."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -397,12 +423,12 @@
|
||||
"source": [
|
||||
"class BidOutputParser(RegexParser):\n",
|
||||
" def get_format_instructions(self) -> str:\n",
|
||||
" return 'Your response should be an integer delimited by angled brackets, like this: <int>.' \n",
|
||||
" \n",
|
||||
" return \"Your response should be an integer delimited by angled brackets, like this: <int>.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"bid_parser = BidOutputParser(\n",
|
||||
" regex=r'<(\\d+)>', \n",
|
||||
" output_keys=['bid'],\n",
|
||||
" default_output_key='bid')"
|
||||
" regex=r\"<(\\d+)>\", output_keys=[\"bid\"], default_output_key=\"bid\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -420,8 +446,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def generate_character_bidding_template(character_header):\n",
|
||||
" bidding_template = (\n",
|
||||
" f\"\"\"{character_header}\n",
|
||||
" bidding_template = f\"\"\"{character_header}\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"{{message_history}}\n",
|
||||
@@ -435,11 +460,14 @@
|
||||
"\n",
|
||||
"{bid_parser.get_format_instructions()}\n",
|
||||
"Do nothing else.\n",
|
||||
" \"\"\")\n",
|
||||
" \"\"\"\n",
|
||||
" return bidding_template\n",
|
||||
"\n",
|
||||
"character_bidding_templates = [generate_character_bidding_template(character_header) for character_header in character_headers]\n",
|
||||
" \n"
|
||||
"\n",
|
||||
"character_bidding_templates = [\n",
|
||||
" generate_character_bidding_template(character_header)\n",
|
||||
" for character_header in character_headers\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -524,8 +552,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for character_name, bidding_template in zip(character_names, character_bidding_templates):\n",
|
||||
" print(f'{character_name} Bidding Template:')\n",
|
||||
"for character_name, bidding_template in zip(\n",
|
||||
" character_names, character_bidding_templates\n",
|
||||
"):\n",
|
||||
" print(f\"{character_name} Bidding Template:\")\n",
|
||||
" print(bidding_template)"
|
||||
]
|
||||
},
|
||||
@@ -557,8 +587,8 @@
|
||||
"source": [
|
||||
"topic_specifier_prompt = [\n",
|
||||
" SystemMessage(content=\"You can make a task more specific.\"),\n",
|
||||
" HumanMessage(content=\n",
|
||||
" f\"\"\"{game_description}\n",
|
||||
" HumanMessage(\n",
|
||||
" content=f\"\"\"{game_description}\n",
|
||||
" \n",
|
||||
" You are the debate moderator.\n",
|
||||
" Please make the debate topic more specific. \n",
|
||||
@@ -567,7 +597,7 @@
|
||||
" Please reply with the specified topic in {word_limit} words or less. \n",
|
||||
" Speak directly to the presidential candidates: {*character_names,}.\n",
|
||||
" Do not add anything else.\"\"\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content\n",
|
||||
"\n",
|
||||
@@ -591,17 +621,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tenacity.retry(stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"),\n",
|
||||
" retry_error_callback=lambda retry_state: 0) # Default value when all retries are exhausted\n",
|
||||
"@tenacity.retry(\n",
|
||||
" stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(\n",
|
||||
" f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"\n",
|
||||
" ),\n",
|
||||
" retry_error_callback=lambda retry_state: 0,\n",
|
||||
") # Default value when all retries are exhausted\n",
|
||||
"def ask_for_bid(agent) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Ask for agent bid and parses the bid into the correct format.\n",
|
||||
" \"\"\"\n",
|
||||
" bid_string = agent.bid()\n",
|
||||
" bid = int(bid_parser.parse(bid_string)['bid'])\n",
|
||||
" bid = int(bid_parser.parse(bid_string)[\"bid\"])\n",
|
||||
" return bid"
|
||||
]
|
||||
},
|
||||
@@ -613,24 +647,25 @@
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:\n",
|
||||
" bids = []\n",
|
||||
" for agent in agents:\n",
|
||||
" bid = ask_for_bid(agent)\n",
|
||||
" bids.append(bid)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" # randomly select among multiple agents with the same bid\n",
|
||||
" max_value = np.max(bids)\n",
|
||||
" max_indices = np.where(bids == max_value)[0]\n",
|
||||
" idx = np.random.choice(max_indices)\n",
|
||||
" \n",
|
||||
" print('Bids:')\n",
|
||||
"\n",
|
||||
" print(\"Bids:\")\n",
|
||||
" for i, (bid, agent) in enumerate(zip(bids, agents)):\n",
|
||||
" print(f'\\t{agent.name} bid: {bid}')\n",
|
||||
" print(f\"\\t{agent.name} bid: {bid}\")\n",
|
||||
" if i == idx:\n",
|
||||
" selected_name = agent.name\n",
|
||||
" print(f'Selected: {selected_name}')\n",
|
||||
" print('\\n')\n",
|
||||
" print(f\"Selected: {selected_name}\")\n",
|
||||
" print(\"\\n\")\n",
|
||||
" return idx"
|
||||
]
|
||||
},
|
||||
@@ -648,13 +683,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"characters = []\n",
|
||||
"for character_name, character_system_message, bidding_template in zip(character_names, character_system_messages, character_bidding_templates):\n",
|
||||
" characters.append(BiddingDialogueAgent(\n",
|
||||
" name=character_name,\n",
|
||||
" system_message=character_system_message,\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" bidding_template=bidding_template,\n",
|
||||
" ))"
|
||||
"for character_name, character_system_message, bidding_template in zip(\n",
|
||||
" character_names, character_system_messages, character_bidding_templates\n",
|
||||
"):\n",
|
||||
" characters.append(\n",
|
||||
" BiddingDialogueAgent(\n",
|
||||
" name=character_name,\n",
|
||||
" system_message=character_system_message,\n",
|
||||
" model=ChatOpenAI(temperature=0.2),\n",
|
||||
" bidding_template=bidding_template,\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -776,19 +815,16 @@
|
||||
"max_iters = 10\n",
|
||||
"n = 0\n",
|
||||
"\n",
|
||||
"simulator = DialogueSimulator(\n",
|
||||
" agents=characters,\n",
|
||||
" selection_function=select_next_speaker\n",
|
||||
")\n",
|
||||
"simulator = DialogueSimulator(agents=characters, selection_function=select_next_speaker)\n",
|
||||
"simulator.reset()\n",
|
||||
"simulator.inject('Debate Moderator', specified_topic)\n",
|
||||
"simulator.inject(\"Debate Moderator\", specified_topic)\n",
|
||||
"print(f\"(Debate Moderator): {specified_topic}\")\n",
|
||||
"print('\\n')\n",
|
||||
"print(\"\\n\")\n",
|
||||
"\n",
|
||||
"while n < max_iters:\n",
|
||||
" name, message = simulator.step()\n",
|
||||
" print(f\"({name}): {message}\")\n",
|
||||
" print('\\n')\n",
|
||||
" print(\"\\n\")\n",
|
||||
" n += 1"
|
||||
]
|
||||
},
|
||||
202
cookbook/myscale_vector_sql.ipynb
Normal file
202
cookbook/myscale_vector_sql.ipynb
Normal file
@@ -0,0 +1,202 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "245065c6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vector SQL Retriever with MyScale\n",
|
||||
"\n",
|
||||
">[MyScale](https://docs.myscale.com/en/) is an integrated vector database. You can access your database in SQL and also from here, LangChain. MyScale can make a use of [various data types and functions for filters](https://blog.myscale.com/2023/06/06/why-integrated-database-solution-can-boost-your-llm-apps/#filter-on-anything-without-constraints). It will boost up your LLM app no matter if you are scaling up your data or expand your system to broader application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0246c5bf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip3 install clickhouse-sqlalchemy InstructorEmbedding sentence_transformers openai langchain-experimental"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7585d2c3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"from os import environ\n",
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.utilities import SQLDatabase\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
|
||||
"from sqlalchemy import MetaData, create_engine\n",
|
||||
"\n",
|
||||
"MYSCALE_HOST = \"msc-4a9e710a.us-east-1.aws.staging.myscale.cloud\"\n",
|
||||
"MYSCALE_PORT = 443\n",
|
||||
"MYSCALE_USER = \"chatdata\"\n",
|
||||
"MYSCALE_PASSWORD = \"myscale_rocks\"\n",
|
||||
"OPENAI_API_KEY = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"\n",
|
||||
"engine = create_engine(\n",
|
||||
" f\"clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/default?protocol=https\"\n",
|
||||
")\n",
|
||||
"metadata = MetaData(bind=engine)\n",
|
||||
"environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e08d9ddc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import HuggingFaceInstructEmbeddings\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLOutputParser\n",
|
||||
"\n",
|
||||
"output_parser = VectorSQLOutputParser.from_embeddings(\n",
|
||||
" model=HuggingFaceInstructEmbeddings(\n",
|
||||
" model_name=\"hkunlp/instructor-xl\", model_kwargs={\"device\": \"cpu\"}\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "84b705b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.utilities.sql_database import SQLDatabase\n",
|
||||
"from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
|
||||
"\n",
|
||||
"chain = VectorSQLDatabaseChain(\n",
|
||||
" llm_chain=LLMChain(\n",
|
||||
" llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),\n",
|
||||
" prompt=MYSCALE_PROMPT,\n",
|
||||
" ),\n",
|
||||
" top_k=10,\n",
|
||||
" return_direct=True,\n",
|
||||
" sql_cmd_parser=output_parser,\n",
|
||||
" database=SQLDatabase(engine, None, metadata),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"pd.DataFrame(\n",
|
||||
" chain.run(\n",
|
||||
" \"Please give me 10 papers to ask what is PageRank?\",\n",
|
||||
" callbacks=[StdOutCallbackHandler()],\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6c09cda0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## SQL Database as Retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "734d7ff5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain_experimental.retrievers.vector_sql_database import (\n",
|
||||
" VectorSQLDatabaseChainRetriever,\n",
|
||||
")\n",
|
||||
"from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n",
|
||||
"from langchain_experimental.sql.vector_sql import (\n",
|
||||
" VectorSQLDatabaseChain,\n",
|
||||
" VectorSQLRetrieveAllOutputParser,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(\n",
|
||||
" output_parser.model\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = VectorSQLDatabaseChain.from_llm(\n",
|
||||
" llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),\n",
|
||||
" prompt=MYSCALE_PROMPT,\n",
|
||||
" top_k=10,\n",
|
||||
" return_direct=True,\n",
|
||||
" db=SQLDatabase(engine, None, metadata),\n",
|
||||
" sql_cmd_parser=output_parser_retrieve_all,\n",
|
||||
" native_format=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# You need all those keys to get docs\n",
|
||||
"retriever = VectorSQLDatabaseChainRetriever(\n",
|
||||
" sql_db_chain=chain, page_content_key=\"abstract\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_with_metadata_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"page_content\", \"id\", \"title\", \"authors\", \"pubdate\", \"categories\"],\n",
|
||||
" template=\"Content:\\n\\tTitle: {title}\\n\\tAbstract: {page_content}\\n\\tAuthors: {authors}\\n\\tDate of Publication: {pubdate}\\n\\tCategories: {categories}\\nSOURCE: {id}\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = RetrievalQAWithSourcesChain.from_chain_type(\n",
|
||||
" ChatOpenAI(\n",
|
||||
" model_name=\"gpt-3.5-turbo-16k\", openai_api_key=OPENAI_API_KEY, temperature=0.6\n",
|
||||
" ),\n",
|
||||
" retriever=retriever,\n",
|
||||
" chain_type=\"stuff\",\n",
|
||||
" chain_type_kwargs={\n",
|
||||
" \"document_prompt\": document_with_metadata_prompt,\n",
|
||||
" },\n",
|
||||
" return_source_documents=True,\n",
|
||||
")\n",
|
||||
"ans = chain(\n",
|
||||
" \"Please give me 10 papers to ask what is PageRank?\",\n",
|
||||
" callbacks=[StdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"print(ans[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4948ff25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
449
cookbook/openai_functions_retrieval_qa.ipynb
Normal file
449
cookbook/openai_functions_retrieval_qa.ipynb
Normal file
@@ -0,0 +1,449 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71a43144",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Structure answers with OpenAI functions\n",
|
||||
"\n",
|
||||
"OpenAI functions allows for structuring of response output. This is often useful in question answering when you want to not only get the final answer but also supporting evidence, citations, etc.\n",
|
||||
"\n",
|
||||
"In this notebook we show how to use an LLM chain which uses OpenAI functions as part of an overall retrieval pipeline."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "f059012e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import Chroma"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "f10b831c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader(\"../../state_of_the_union.txt\", encoding=\"utf-8\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"for i, text in enumerate(texts):\n",
|
||||
" text.metadata[\"source\"] = f\"{i}-pl\"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"docsearch = Chroma.from_documents(texts, embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "70f3a38c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import create_qa_with_sources_chain\n",
|
||||
"from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "7b3e1731",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "70a9ccff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa_chain = create_qa_with_sources_chain(llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "efcdb6fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_prompt = PromptTemplate(\n",
|
||||
" template=\"Content: {page_content}\\nSource: {source}\",\n",
|
||||
" input_variables=[\"page_content\", \"source\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "64a08263",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"final_qa_chain = StuffDocumentsChain(\n",
|
||||
" llm_chain=qa_chain,\n",
|
||||
" document_variable_name=\"context\",\n",
|
||||
" document_prompt=doc_prompt,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "cb876c97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retrieval_qa = RetrievalQA(\n",
|
||||
" retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "a75bad9b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about russia\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"id": "9a60f109",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'{\\n \"answer\": \"The President expressed strong condemnation of Russia\\'s actions in Ukraine and announced measures to isolate Russia and provide support to Ukraine. He stated that Russia\\'s invasion of Ukraine will have long-term consequences for Russia and emphasized the commitment to defend NATO countries. The President also mentioned taking robust action through sanctions and releasing oil reserves to mitigate gas prices. Overall, the President conveyed a message of solidarity with Ukraine and determination to protect American interests.\",\\n \"sources\": [\"0-pl\", \"4-pl\", \"5-pl\", \"6-pl\"]\\n}'"
|
||||
]
|
||||
},
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retrieval_qa.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a60f93a4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using Pydantic\n",
|
||||
"\n",
|
||||
"If we want to, we can set the chain to return in Pydantic. Note that if downstream chains consume the output of this chain - including memory - they will generally expect it to be in string format, so you should only use this chain when it is the final chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "3559727f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa_chain_pydantic = create_qa_with_sources_chain(llm, output_parser=\"pydantic\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"id": "5a7997d1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"final_qa_chain_pydantic = StuffDocumentsChain(\n",
|
||||
" llm_chain=qa_chain_pydantic,\n",
|
||||
" document_variable_name=\"context\",\n",
|
||||
" document_prompt=doc_prompt,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "79368e40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retrieval_qa_pydantic = RetrievalQA(\n",
|
||||
" retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain_pydantic\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "6b8641de",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AnswerWithSources(answer=\"The President expressed strong condemnation of Russia's actions in Ukraine and announced measures to isolate Russia and provide support to Ukraine. He stated that Russia's invasion of Ukraine will have long-term consequences for Russia and emphasized the commitment to defend NATO countries. The President also mentioned taking robust action through sanctions and releasing oil reserves to mitigate gas prices. Overall, the President conveyed a message of solidarity with Ukraine and determination to protect American interests.\", sources=['0-pl', '4-pl', '5-pl', '6-pl'])"
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retrieval_qa_pydantic.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e4c15395",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using in ConversationalRetrievalChain\n",
|
||||
"\n",
|
||||
"We can also show what it's like to use this in the ConversationalRetrievalChain. Note that because this chain involves memory, we will NOT use the Pydantic return type."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "18e5f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain, LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
|
||||
"_template = \"\"\"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\\\n",
|
||||
"Make sure to avoid using any unclear pronouns.\n",
|
||||
"\n",
|
||||
"Chat History:\n",
|
||||
"{chat_history}\n",
|
||||
"Follow Up Input: {question}\n",
|
||||
"Standalone question:\"\"\"\n",
|
||||
"CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)\n",
|
||||
"condense_question_chain = LLMChain(\n",
|
||||
" llm=llm,\n",
|
||||
" prompt=CONDENSE_QUESTION_PROMPT,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "975c3c2b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa = ConversationalRetrievalChain(\n",
|
||||
" question_generator=condense_question_chain,\n",
|
||||
" retriever=docsearch.as_retriever(),\n",
|
||||
" memory=memory,\n",
|
||||
" combine_docs_chain=final_qa_chain,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "784aee3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"result = qa({\"question\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "dfd0ccc1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'What did the president say about Ketanji Brown Jackson',\n",
|
||||
" 'chat_history': [HumanMessage(content='What did the president say about Ketanji Brown Jackson', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='{\\n \"answer\": \"The President nominated Ketanji Brown Jackson as a Circuit Court of Appeals Judge and praised her as one of the nation\\'s top legal minds who will continue Justice Breyer\\'s legacy of excellence.\",\\n \"sources\": [\"31-pl\"]\\n}', additional_kwargs={}, example=False)],\n",
|
||||
" 'answer': '{\\n \"answer\": \"The President nominated Ketanji Brown Jackson as a Circuit Court of Appeals Judge and praised her as one of the nation\\'s top legal minds who will continue Justice Breyer\\'s legacy of excellence.\",\\n \"sources\": [\"31-pl\"]\\n}'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "c93f805b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"what did he say about her predecessor?\"\n",
|
||||
"result = qa({\"question\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "5d8612c0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'what did he say about her predecessor?',\n",
|
||||
" 'chat_history': [HumanMessage(content='What did the president say about Ketanji Brown Jackson', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='{\\n \"answer\": \"The President nominated Ketanji Brown Jackson as a Circuit Court of Appeals Judge and praised her as one of the nation\\'s top legal minds who will continue Justice Breyer\\'s legacy of excellence.\",\\n \"sources\": [\"31-pl\"]\\n}', additional_kwargs={}, example=False),\n",
|
||||
" HumanMessage(content='what did he say about her predecessor?', additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content='{\\n \"answer\": \"The President honored Justice Stephen Breyer for his service as an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.\",\\n \"sources\": [\"31-pl\"]\\n}', additional_kwargs={}, example=False)],\n",
|
||||
" 'answer': '{\\n \"answer\": \"The President honored Justice Stephen Breyer for his service as an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.\",\\n \"sources\": [\"31-pl\"]\\n}'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 44,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac9e4626",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using your own output schema\n",
|
||||
"\n",
|
||||
"We can change the outputs of our chain by passing in our own schema. The values and descriptions of this schema will inform the function we pass to the OpenAI API, meaning it won't just affect how we parse outputs but will also change the OpenAI output itself. For example we can add a `countries_referenced` parameter to our schema and describe what we want this parameter to mean, and that'll cause the OpenAI output to include a description of a speaker in the response.\n",
|
||||
"\n",
|
||||
"In addition to the previous example, we can also add a custom prompt to the chain. This will allow you to add additional context to the response, which can be useful for question answering."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "f34a48f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.chains.openai_functions import create_qa_with_structure_chain\n",
|
||||
"from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "5647c161",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"CustomResponseSchema(answer=\"He announced that American airspace will be closed off to all Russian flights, further isolating Russia and adding an additional squeeze on their economy. The Ruble has lost 30% of its value and the Russian stock market has lost 40% of its value. He also mentioned that Putin alone is to blame for Russia's reeling economy. The United States and its allies are providing support to Ukraine in their fight for freedom, including military, economic, and humanitarian assistance. The United States is giving more than $1 billion in direct assistance to Ukraine. He made it clear that American forces are not engaged and will not engage in conflict with Russian forces in Ukraine, but they are deployed to defend NATO allies in case Putin decides to keep moving west. He also mentioned that Putin's attack on Ukraine was premeditated and unprovoked, and that the West and NATO responded by building a coalition of freedom-loving nations to confront Putin. The free world is holding Putin accountable through powerful economic sanctions, cutting off Russia's largest banks from the international financial system, and preventing Russia's central bank from defending the Russian Ruble. The U.S. Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs.\", countries_referenced=['AMERICA', 'RUSSIA', 'UKRAINE'], sources=['4-pl', '5-pl', '2-pl', '3-pl'])"
|
||||
]
|
||||
},
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"class CustomResponseSchema(BaseModel):\n",
|
||||
" \"\"\"An answer to the question being asked, with sources.\"\"\"\n",
|
||||
"\n",
|
||||
" answer: str = Field(..., description=\"Answer to the question that was asked\")\n",
|
||||
" countries_referenced: List[str] = Field(\n",
|
||||
" ..., description=\"All of the countries mentioned in the sources\"\n",
|
||||
" )\n",
|
||||
" sources: List[str] = Field(\n",
|
||||
" ..., description=\"List of sources used to answer the question\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt_messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=(\n",
|
||||
" \"You are a world class algorithm to answer \"\n",
|
||||
" \"questions in a specific format.\"\n",
|
||||
" )\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"Answer question using the following context\"),\n",
|
||||
" HumanMessagePromptTemplate.from_template(\"{context}\"),\n",
|
||||
" HumanMessagePromptTemplate.from_template(\"Question: {question}\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Tips: Make sure to answer in the correct format. Return all of the countries mentioned in the sources in uppercase characters.\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"chain_prompt = ChatPromptTemplate(messages=prompt_messages)\n",
|
||||
"\n",
|
||||
"qa_chain_pydantic = create_qa_with_structure_chain(\n",
|
||||
" llm, CustomResponseSchema, output_parser=\"pydantic\", prompt=chain_prompt\n",
|
||||
")\n",
|
||||
"final_qa_chain_pydantic = StuffDocumentsChain(\n",
|
||||
" llm_chain=qa_chain_pydantic,\n",
|
||||
" document_variable_name=\"context\",\n",
|
||||
" document_prompt=doc_prompt,\n",
|
||||
")\n",
|
||||
"retrieval_qa_pydantic = RetrievalQA(\n",
|
||||
" retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain_pydantic\n",
|
||||
")\n",
|
||||
"query = \"What did he say about russia\"\n",
|
||||
"retrieval_qa_pydantic.run(query)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
506
cookbook/openai_v1_cookbook.ipynb
Normal file
506
cookbook/openai_v1_cookbook.ipynb
Normal file
@@ -0,0 +1,506 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f970f757-ec76-4bf0-90cd-a2fb68b945e3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Exploring OpenAI V1 functionality\n",
|
||||
"\n",
|
||||
"On 11.06.23 OpenAI released a number of new features, and along with it bumped their Python SDK to 1.0.0. This notebook shows off the new features and how to use them with LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ee897729-263a-4073-898f-bb4cf01ed829",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# need openai>=1.1.0, langchain>=0.0.335, langchain-experimental>=0.0.39\n",
|
||||
"!pip install -U openai langchain langchain-experimental"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c3e067ce-7a43-47a7-bc89-41f1de4cf136",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fa7e7e95-90a1-4f73-98fe-10c4b4e0951b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [Vision](https://platform.openai.com/docs/guides/vision)\n",
|
||||
"\n",
|
||||
"OpenAI released multi-modal models, which can take a sequence of text and images as input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "1c8c3965-d3c9-4186-b5f3-5e67855ef916",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The image appears to be a diagram representing the architecture or components of a software system or framework related to language processing, possibly named LangChain or associated with a project or product called LangChain, based on the prominent appearance of that term. The diagram is organized into several layers or aspects, each containing various elements or modules:\\n\\n1. **Protocol**: This may be the foundational layer, which includes \"LCEL\" and terms like parallelization, fallbacks, tracing, batching, streaming, async, and composition. These seem related to communication and execution protocols for the system.\\n\\n2. **Integrations Components**: This layer includes \"Model I/O\" with elements such as the model, output parser, prompt, and example selector. It also has a \"Retrieval\" section with a document loader, retriever, embedding model, vector store, and text splitter. Lastly, there\\'s an \"Agent Tooling\" section. These components likely deal with the interaction with external data, models, and tools.\\n\\n3. **Application**: The application layer features \"LangChain\" with chains, agents, agent executors, and common application logic. This suggests that the system uses a modular approach with chains and agents to process language tasks.\\n\\n4. **Deployment**: This contains \"Lang')"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatOpenAI(model=\"gpt-4-vision-preview\", max_tokens=256)\n",
|
||||
"chat.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"What is this image showing\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\n",
|
||||
" \"url\": \"https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png\",\n",
|
||||
" \"detail\": \"auto\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "210f8248-fcf3-4052-a4a3-0684e08f8785",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [OpenAI assistants](https://platform.openai.com/docs/assistants/overview)\n",
|
||||
"\n",
|
||||
"> The Assistants API allows you to build AI assistants within your own applications. An Assistant has instructions and can leverage models, tools, and knowledge to respond to user queries. The Assistants API currently supports three types of tools: Code Interpreter, Retrieval, and Function calling\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"You can interact with OpenAI Assistants using OpenAI tools or custom tools. When using exclusively OpenAI tools, you can just invoke the assistant directly and get final answers. When using custom tools, you can run the assistant and tool execution loop using the built-in AgentExecutor or easily write your own executor.\n",
|
||||
"\n",
|
||||
"Below we show the different ways to interact with Assistants. As a simple example, let's build a math tutor that can write and run code."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "318da28d-4cec-42ab-ae3e-76d95bb34fa5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using only OpenAI tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a9064bbe-d9f7-4a29-a7b3-73933b3197e7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.openai_assistant import OpenAIAssistantRunnable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7a20a008-49ac-46d2-aa26-b270118af5ea",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[ThreadMessage(id='msg_g9OJv0rpPgnc3mHmocFv7OVd', assistant_id='asst_hTwZeNMMphxzSOqJ01uBMsJI', content=[MessageContentText(text=Text(annotations=[], value='The result of \\\\(10 - 4^{2.7}\\\\) is approximately \\\\(-32.224\\\\).'), type='text')], created_at=1699460600, file_ids=[], metadata={}, object='thread.message', role='assistant', run_id='run_nBIT7SiAwtUfSCTrQNSPLOfe', thread_id='thread_14n4GgXwxgNL0s30WJW5F6p0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interpreter_assistant = OpenAIAssistantRunnable.create_assistant(\n",
|
||||
" name=\"langchain assistant\",\n",
|
||||
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
|
||||
" tools=[{\"type\": \"code_interpreter\"}],\n",
|
||||
" model=\"gpt-4-1106-preview\",\n",
|
||||
")\n",
|
||||
"output = interpreter_assistant.invoke({\"content\": \"What's 10 - 4 raised to the 2.7\"})\n",
|
||||
"output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a8ddd181-ac63-4ab6-a40d-a236120379c1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### As a LangChain agent with arbitrary tools\n",
|
||||
"\n",
|
||||
"Now let's recreate this functionality using our own tools. For this example we'll use the [E2B sandbox runtime tool](https://e2b.dev/docs?ref=landing-page-get-started)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ee4cc355-f2d6-4c51-bcf7-f502868357d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install e2b duckduckgo-search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "48681ac7-b267-48d4-972c-8a7df8393a21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool\n",
|
||||
"\n",
|
||||
"tools = [E2BDataAnalysisTool(api_key=\"...\"), DuckDuckGoSearchRun()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1c01dd79-dd3e-4509-a2e2-009a7f99f16a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = OpenAIAssistantRunnable.create_assistant(\n",
|
||||
" name=\"langchain assistant e2b tool\",\n",
|
||||
" instructions=\"You are a personal math tutor. Write and run code to answer math questions. You can also search the internet.\",\n",
|
||||
" tools=tools,\n",
|
||||
" model=\"gpt-4-1106-preview\",\n",
|
||||
" as_agent=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1ac71d8b-4b4b-4f98-b826-6b3c57a34166",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Using AgentExecutor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "1f137f94-801f-4766-9ff5-2de9df5e8079",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'content': \"What's the weather in SF today divided by 2.7\",\n",
|
||||
" 'output': \"The weather in San Francisco today is reported to have temperatures as high as 66 °F. To get the temperature divided by 2.7, we will calculate that:\\n\\n66 °F / 2.7 = 24.44 °F\\n\\nSo, when the high temperature of 66 °F is divided by 2.7, the result is approximately 24.44 °F. Please note that this doesn't have a meteorological meaning; it's purely a mathematical operation based on the given temperature.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
|
||||
"agent_executor.invoke({\"content\": \"What's the weather in SF today divided by 2.7\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2d0a0b1d-c1b3-4b50-9dce-1189b51a6206",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Custom execution"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c0475fa7-b6c1-4331-b8e2-55407466c724",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = OpenAIAssistantRunnable.create_assistant(\n",
|
||||
" name=\"langchain assistant e2b tool\",\n",
|
||||
" instructions=\"You are a personal math tutor. Write and run code to answer math questions.\",\n",
|
||||
" tools=tools,\n",
|
||||
" model=\"gpt-4-1106-preview\",\n",
|
||||
" as_agent=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "b76cb669-6aba-4827-868f-00aa960026f2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema.agent import AgentFinish\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def execute_agent(agent, tools, input):\n",
|
||||
" tool_map = {tool.name: tool for tool in tools}\n",
|
||||
" response = agent.invoke(input)\n",
|
||||
" while not isinstance(response, AgentFinish):\n",
|
||||
" tool_outputs = []\n",
|
||||
" for action in response:\n",
|
||||
" tool_output = tool_map[action.tool].invoke(action.tool_input)\n",
|
||||
" print(action.tool, action.tool_input, tool_output, end=\"\\n\\n\")\n",
|
||||
" tool_outputs.append(\n",
|
||||
" {\"output\": tool_output, \"tool_call_id\": action.tool_call_id}\n",
|
||||
" )\n",
|
||||
" response = agent.invoke(\n",
|
||||
" {\n",
|
||||
" \"tool_outputs\": tool_outputs,\n",
|
||||
" \"run_id\": action.run_id,\n",
|
||||
" \"thread_id\": action.thread_id,\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "7946116a-b82f-492e-835e-ca958a8949a5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"e2b_data_analysis {'python_code': 'print(10 - 4 ** 2.7)'} {\"stdout\": \"-32.22425314473263\", \"stderr\": \"\", \"artifacts\": []}\n",
|
||||
"\n",
|
||||
"\\( 10 - 4^{2.7} \\) is approximately \\(-32.22425314473263\\).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = execute_agent(agent, tools, {\"content\": \"What's 10 - 4 raised to the 2.7\"})\n",
|
||||
"print(response.return_values[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "f2744a56-9f4f-4899-827a-fa55821c318c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"e2b_data_analysis {'python_code': 'result = 10 - 4 ** 2.7\\nprint(result + 17.241)'} {\"stdout\": \"-14.983253144732629\", \"stderr\": \"\", \"artifacts\": []}\n",
|
||||
"\n",
|
||||
"When you add \\( 17.241 \\) to \\( 10 - 4^{2.7} \\), the result is approximately \\( -14.98325314473263 \\).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"next_response = execute_agent(\n",
|
||||
" agent, tools, {\"content\": \"now add 17.241\", \"thread_id\": response.thread_id}\n",
|
||||
")\n",
|
||||
"print(next_response.return_values[\"output\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71c34763-d1e7-4b9a-a9d7-3e4cc0dfc2c4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [JSON mode](https://platform.openai.com/docs/guides/text-generation/json-mode)\n",
|
||||
"\n",
|
||||
"Constrain the model to only generate valid JSON. Note that you must include a system message with instructions to use JSON for this mode to work.\n",
|
||||
"\n",
|
||||
"Only works with certain models. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "db6072c4-f3f3-415d-872b-71ea9f3c02bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\").bind(\n",
|
||||
" response_format={\"type\": \"json_object\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"output = chat.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Google was founded in the USA, while Deepmind was founded in the UK\"\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"print(output.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "08e00ccf-b991-4249-846b-9500a0ccbfa0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"json.loads(output.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aa9a94d9-4319-4ab7-a979-c475ce6b5f50",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## [System fingerprint](https://platform.openai.com/docs/guides/text-generation/reproducible-outputs)\n",
|
||||
"\n",
|
||||
"OpenAI sometimes changes model configurations in a way that impacts outputs. Whenever this happens, the system_fingerprint associated with a generation will change."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1281883c-bf8f-4665-89cd-4f33ccde69ab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")\n",
|
||||
"output = chat.generate(\n",
|
||||
" [\n",
|
||||
" [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Google was founded in the USA, while Deepmind was founded in the UK\"\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"print(output.llm_output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aa6565be-985d-4127-848e-c3bca9d7b434",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Breaking changes to Azure classes\n",
|
||||
"\n",
|
||||
"OpenAI V1 rewrote their clients and separated Azure and OpenAI clients. This has led to some changes in LangChain interfaces when using OpenAI V1.\n",
|
||||
"\n",
|
||||
"BREAKING CHANGES:\n",
|
||||
"- To use Azure embeddings with OpenAI V1, you'll need to use the new `AzureOpenAIEmbeddings` instead of the existing `OpenAIEmbeddings`. `OpenAIEmbeddings` continue to work when using Azure with `openai<1`.\n",
|
||||
"```python\n",
|
||||
"from langchain.embeddings import AzureOpenAIEmbeddings\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"RECOMMENDED CHANGES:\n",
|
||||
"- When using `AzureChatOpenAI` or `AzureOpenAI`, if passing in an Azure endpoint (eg https://example-resource.azure.openai.com/) this should be specified via the `azure_endpoint` parameter or the `AZURE_OPENAI_ENDPOINT`. We're maintaining backwards compatibility for now with specifying this via `openai_api_base`/`base_url` or env var `OPENAI_API_BASE` but this shouldn't be relied upon.\n",
|
||||
"- When using Azure chat or embedding models, pass in API keys either via `openai_api_key` parameter or `AZURE_OPENAI_API_KEY` parameter. We're maintaining backwards compatibility for now with specifying this via `OPENAI_API_KEY` but this shouldn't be relied upon."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49944887-3972-497e-8da2-6d32d44345a9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"Use tools for parallel function calling."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "916292d8-0f89-40a6-af1c-5a1122327de8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[GetCurrentWeather(location='New York, NY', unit='fahrenheit'),\n",
|
||||
" GetCurrentWeather(location='Los Angeles, CA', unit='fahrenheit'),\n",
|
||||
" GetCurrentWeather(location='San Francisco, CA', unit='fahrenheit')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Literal\n",
|
||||
"\n",
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetCurrentWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a location.\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
" unit: Literal[\"celsius\", \"fahrenheit\"] = Field(\n",
|
||||
" default=\"fahrenheit\", description=\"The temperature unit, default to fahrenheit\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", \"You are a helpful assistant\"), (\"user\", \"{input}\")]\n",
|
||||
")\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\").bind(\n",
|
||||
" tools=[convert_pydantic_to_openai_tool(GetCurrentWeather)]\n",
|
||||
")\n",
|
||||
"chain = prompt | model | PydanticToolsParser(tools=[GetCurrentWeather])\n",
|
||||
"\n",
|
||||
"chain.invoke({\"input\": \"what's the weather in NYC, LA, and SF\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -45,14 +45,14 @@
|
||||
"source": [
|
||||
"import collections\n",
|
||||
"import inspect\n",
|
||||
"import tenacity\n",
|
||||
"\n",
|
||||
"import tenacity\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain.output_parsers import RegexParser"
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,16 +71,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class GymnasiumAgent():\n",
|
||||
"class GymnasiumAgent:\n",
|
||||
" @classmethod\n",
|
||||
" def get_docs(cls, env):\n",
|
||||
" return env.unwrapped.__doc__\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def __init__(self, model, env):\n",
|
||||
" self.model = model\n",
|
||||
" self.env = env\n",
|
||||
" self.docs = self.get_docs(env)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" self.instructions = \"\"\"\n",
|
||||
"Your goal is to maximize your return, i.e. the sum of the rewards you receive.\n",
|
||||
"I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:\n",
|
||||
@@ -99,26 +99,25 @@
|
||||
"Do nothing else but return the action.\n",
|
||||
"\"\"\"\n",
|
||||
" self.action_parser = RegexParser(\n",
|
||||
" regex=r\"Action: (.*)\", \n",
|
||||
" output_keys=['action'], \n",
|
||||
" default_output_key='action')\n",
|
||||
" \n",
|
||||
" regex=r\"Action: (.*)\", output_keys=[\"action\"], default_output_key=\"action\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" self.message_history = []\n",
|
||||
" self.ret = 0\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def random_action(self):\n",
|
||||
" action = self.env.action_space.sample()\n",
|
||||
" return action\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" self.message_history = [\n",
|
||||
" SystemMessage(content=self.docs),\n",
|
||||
" SystemMessage(content=self.instructions),\n",
|
||||
" ]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def observe(self, obs, rew=0, term=False, trunc=False, info=None):\n",
|
||||
" self.ret += rew\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" obs_message = f\"\"\"\n",
|
||||
"Observation: {obs}\n",
|
||||
"Reward: {rew}\n",
|
||||
@@ -128,24 +127,26 @@
|
||||
" \"\"\"\n",
|
||||
" self.message_history.append(HumanMessage(content=obs_message))\n",
|
||||
" return obs_message\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)['action'])\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def act(self):\n",
|
||||
" try:\n",
|
||||
" for attempt in tenacity.Retrying(\n",
|
||||
" stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"),\n",
|
||||
" ):\n",
|
||||
" stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(\n",
|
||||
" f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"\n",
|
||||
" ),\n",
|
||||
" ):\n",
|
||||
" with attempt:\n",
|
||||
" action = self._act()\n",
|
||||
" except tenacity.RetryError as e:\n",
|
||||
" except tenacity.RetryError:\n",
|
||||
" action = self.random_action()\n",
|
||||
" return action"
|
||||
]
|
||||
@@ -174,13 +175,14 @@
|
||||
" for agent_name in env.agent_iter():\n",
|
||||
" observation, reward, termination, truncation, info = env.last()\n",
|
||||
" obs_message = agents[agent_name].observe(\n",
|
||||
" observation, reward, termination, truncation, info)\n",
|
||||
" observation, reward, termination, truncation, info\n",
|
||||
" )\n",
|
||||
" print(obs_message)\n",
|
||||
" if termination or truncation:\n",
|
||||
" action = None\n",
|
||||
" else:\n",
|
||||
" action = agents[agent_name].act()\n",
|
||||
" print(f'Action: {action}')\n",
|
||||
" print(f\"Action: {action}\")\n",
|
||||
" env.step(action)\n",
|
||||
" env.close()"
|
||||
]
|
||||
@@ -208,11 +210,11 @@
|
||||
" @classmethod\n",
|
||||
" def get_docs(cls, env):\n",
|
||||
" return inspect.getmodule(env.unwrapped).__doc__\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def __init__(self, name, model, env):\n",
|
||||
" super().__init__(model, env)\n",
|
||||
" self.name = name\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def random_action(self):\n",
|
||||
" action = self.env.action_space(self.name).sample()\n",
|
||||
" return action"
|
||||
@@ -306,8 +308,12 @@
|
||||
],
|
||||
"source": [
|
||||
"from pettingzoo.classic import rps_v2\n",
|
||||
"\n",
|
||||
"env = rps_v2.env(max_cycles=3, render_mode=\"human\")\n",
|
||||
"agents = {name: PettingZooAgent(name=name, model=ChatOpenAI(temperature=1), env=env) for name in env.possible_agents}\n",
|
||||
"agents = {\n",
|
||||
" name: PettingZooAgent(name=name, model=ChatOpenAI(temperature=1), env=env)\n",
|
||||
" for name in env.possible_agents\n",
|
||||
"}\n",
|
||||
"main(agents, env)"
|
||||
]
|
||||
},
|
||||
@@ -332,22 +338,22 @@
|
||||
" def __init__(self, name, model, env):\n",
|
||||
" super().__init__(name, model, env)\n",
|
||||
" self.obs_buffer = collections.deque(maxlen=1)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def random_action(self):\n",
|
||||
" obs = self.obs_buffer[-1]\n",
|
||||
" action = self.env.action_space(self.name).sample(obs[\"action_mask\"])\n",
|
||||
" return action\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" self.message_history = [\n",
|
||||
" SystemMessage(content=self.docs),\n",
|
||||
" SystemMessage(content=self.instructions),\n",
|
||||
" ]\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def observe(self, obs, rew=0, term=False, trunc=False, info=None):\n",
|
||||
" self.obs_buffer.append(obs)\n",
|
||||
" return super().observe(obs, rew, term, trunc, info)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" valid_action_instruction = \"Generate a valid action given by the indices of the `action_mask` that are not 0, according to the action formatting rules.\"\n",
|
||||
" self.message_history.append(HumanMessage(content=valid_action_instruction))\n",
|
||||
@@ -603,8 +609,12 @@
|
||||
],
|
||||
"source": [
|
||||
"from pettingzoo.classic import tictactoe_v3\n",
|
||||
"\n",
|
||||
"env = tictactoe_v3.env(render_mode=\"human\")\n",
|
||||
"agents = {name: ActionMaskAgent(name=name, model=ChatOpenAI(temperature=0.2), env=env) for name in env.possible_agents}\n",
|
||||
"agents = {\n",
|
||||
" name: ActionMaskAgent(name=name, model=ChatOpenAI(temperature=0.2), env=env)\n",
|
||||
" for name in env.possible_agents\n",
|
||||
"}\n",
|
||||
"main(agents, env)"
|
||||
]
|
||||
},
|
||||
@@ -788,8 +798,12 @@
|
||||
],
|
||||
"source": [
|
||||
"from pettingzoo.classic import texas_holdem_no_limit_v6\n",
|
||||
"\n",
|
||||
"env = texas_holdem_no_limit_v6.env(num_players=4, render_mode=\"human\")\n",
|
||||
"agents = {name: ActionMaskAgent(name=name, model=ChatOpenAI(temperature=0.2), env=env) for name in env.possible_agents}\n",
|
||||
"agents = {\n",
|
||||
" name: ActionMaskAgent(name=name, model=ChatOpenAI(temperature=0.2), env=env)\n",
|
||||
" for name in env.possible_agents\n",
|
||||
"}\n",
|
||||
"main(agents, env)"
|
||||
]
|
||||
}
|
||||
258
cookbook/plan_and_execute_agent.ipynb
Normal file
258
cookbook/plan_and_execute_agent.ipynb
Normal file
@@ -0,0 +1,258 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ddfef23-3c74-444c-81dd-6753722997fa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Plan-and-execute\n",
|
||||
"\n",
|
||||
"Plan-and-execute agents accomplish an objective by first planning what to do, then executing the sub tasks. This idea is largely inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) and then the [\"Plan-and-Solve\" paper](https://arxiv.org/abs/2305.04091).\n",
|
||||
"\n",
|
||||
"The planning is almost always done by an LLM.\n",
|
||||
"\n",
|
||||
"The execution is usually done by a separate agent (equipped with tools)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a7ecb22a-7009-48ec-b14e-f0fa5aac1cd0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5fbbd4ee-bfe8-4a25-afe4-8d1a552a3d2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents.tools import Tool\n",
|
||||
"from langchain.chains import LLMMathChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_experimental.plan_and_execute import (\n",
|
||||
" PlanAndExecute,\n",
|
||||
" load_agent_executor,\n",
|
||||
" load_chat_planner,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e0e995e5-af9d-4988-bcd0-467a2a2e18cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1d789f4e-54e3-4602-891a-f076e0ab9594",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = DuckDuckGoSearchAPIWrapper()\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"Calculator\",\n",
|
||||
" func=llm_math_chain.run,\n",
|
||||
" description=\"useful for when you need to answer questions about math\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "04dc6452-a07f-49f9-be12-95be1e2afccc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Planner, Executor, and Agent\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d8f49c03-c804-458b-8122-c92b26c7b7dd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = ChatOpenAI(temperature=0)\n",
|
||||
"planner = load_chat_planner(model)\n",
|
||||
"executor = load_agent_executor(model, tools, verbose=True)\n",
|
||||
"agent = PlanAndExecute(planner=planner, executor=executor)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "78ba03dd-0322-4927-b58d-a7e2027fdbb3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a57f7efe-7866-47a7-bce5-9c7b1047964e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction:\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"current prime minister of the UK\"\n",
|
||||
"}\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"current prime minister of the UK\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mBottom right: Rishi Sunak is the current prime minister and the first non-white prime minister. The prime minister of the United Kingdom is the principal minister of the crown of His Majesty's Government, and the head of the British Cabinet. 3 min. British Prime Minister Rishi Sunak asserted his stance on gender identity in a speech Wednesday, stating it was \"common sense\" that \"a man is a man and a woman is a woman\" — a ... The former chancellor Rishi Sunak is the UK's new prime minister. Here's what you need to know about him. He won after running for the second time this year He lost to Liz Truss in September,... Isaeli Prime Minister Benjamin Netanyahu spoke with US President Joe Biden on Wednesday, the prime minister's office said in a statement. Netanyahu \"thanked the President for the powerful words of ... By Yasmeen Serhan/London Updated: October 25, 2022 12:56 PM EDT | Originally published: October 24, 2022 9:17 AM EDT S top me if you've heard this one before: After a tumultuous period of political...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe search results indicate that Rishi Sunak is the current prime minister of the UK. However, it's important to note that this information may not be accurate or up to date.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"current age of the prime minister of the UK\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mHow old is Rishi Sunak? Mr Sunak was born on 12 May, 1980, making him 42 years old. He first became an MP in 2015, aged 34, and has served the constituency of Richmond in Yorkshire ever since. He... Prime Ministers' ages when they took office From oldest to youngest, the ages of the PMs were as follows: Winston Churchill - 65 years old James Callaghan - 64 years old Clement Attlee - 62 years... Anna Kaufman USA TODAY Just a few days after Liz Truss resigned as prime minister, the UK has a new prime minister. Truss, who lasted a mere 45 days in office, will be replaced by Rishi... Advertisement Rishi Sunak is the youngest British prime minister of modern times. Mr. Sunak is 42 and started out in Parliament in 2015. Rishi Sunak was appointed as chancellor of the Exchequer... The first prime minister of the current United Kingdom of Great Britain and Northern Ireland upon its effective creation in 1922 (when 26 Irish counties seceded and created the Irish Free State) was Bonar Law, [10] although the country was not renamed officially until 1927, when Stanley Baldwin was the serving prime minister. [11]\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mBased on the search results, it seems that Rishi Sunak is the current prime minister of the UK. However, I couldn't find any specific information about his age. Would you like me to search again for the current age of the prime minister?\n",
|
||||
"\n",
|
||||
"Action:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Search\",\n",
|
||||
" \"action_input\": \"age of Rishi Sunak\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mRishi Sunak is 42 years old, making him the youngest person to hold the office of prime minister in modern times. How tall is Rishi Sunak? How Old Is Rishi Sunak? Rishi Sunak was born on May 12, 1980, in Southampton, England. Parents and Nationality Sunak's parents were born to Indian-origin families in East Africa before... Born on May 12, 1980, Rishi is currently 42 years old. He has been a member of parliament since 2015 where he was an MP for Richmond and has served in roles including Chief Secretary to the Treasury and the Chancellor of Exchequer while Boris Johnson was PM. Family Murty, 42, is the daughter of the Indian billionaire NR Narayana Murthy, often described as the Bill Gates of India, who founded the software company Infosys. According to reports, his... Sunak became the first non-White person to lead the country and, at age 42, the youngest to take on the role in more than a century. Like most politicians, Sunak is revered by some and...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mBased on the search results, Rishi Sunak is currently 42 years old. He was born on May 12, 1980.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: To calculate the age raised to the power of 0.43, I can use the calculator tool.\n",
|
||||
"\n",
|
||||
"Action:\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"action\": \"Calculator\",\n",
|
||||
" \"action_input\": \"42^0.43\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"42^0.43\u001b[32;1m\u001b[1;3m```text\n",
|
||||
"42**0.43\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"42**0.43\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m4.9888126515157\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 4.9888126515157\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mThe age raised to the power of 0.43 is approximately 4.9888126515157.\n",
|
||||
"\n",
|
||||
"Final Answer:\n",
|
||||
"```json\n",
|
||||
"{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"The age raised to the power of 0.43 is approximately 4.9888126515157.\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Final Answer\",\n",
|
||||
" \"action_input\": \"The current prime minister of the UK is Rishi Sunak. His age raised to the power of 0.43 is approximately 4.9888126515157.\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The current prime minister of the UK is Rishi Sunak. His age raised to the power of 0.43 is approximately 4.9888126515157.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\n",
|
||||
" \"Who is the current prime minister of the UK? What is their current age raised to the 0.43 power?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0ef78a07-1a2a-46f8-9bc9-ae45f9bd706c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
156
cookbook/press_releases.ipynb
Normal file
156
cookbook/press_releases.ipynb
Normal file
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "62ee82e4-2ad8-498b-8438-fac388afe1a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Press Releases Data\n",
|
||||
"=\n",
|
||||
"\n",
|
||||
"Press Releases data powered by [Kay.ai](https://kay.ai).\n",
|
||||
"\n",
|
||||
">Press releases are used by companies to announce something noteworthy, including product launches, financial performance reports, partnerships, and other significant news. They are widely used by analysts to track corporate strategy, operational updates and financial performance.\n",
|
||||
"Kay.ai obtains press releases of all US public companies from a variety of sources, which include the company's official press room and partnerships with various data API providers. \n",
|
||||
"This data is updated till Sept 30th for free access, if you want to access the real-time feed, reach out to us at hello@kay.ai or [tweet at us](https://twitter.com/vishalrohra_)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8183d85d-365f-4672-a963-52b533547de0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Setup\n",
|
||||
"=\n",
|
||||
"\n",
|
||||
"First you will need to install the `kay` package. You will also need an API key: you can get one for free at [https://kay.ai](https://kay.ai/). Once you have an API key, you must set it as an environment variable `KAY_API_KEY`.\n",
|
||||
"\n",
|
||||
"In this example we're going to use the `KayAiRetriever`. Take a look at the [kay notebook](/docs/integrations/retrievers/kay) for more detailed information for the parmeters that it accepts."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02ec21c7-49fe-4844-b58a-bf064ad40b2a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Examples\n",
|
||||
"="
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "bf0395f7-6ebe-4136-8b0d-00b9dea3becd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n",
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Setup API keys for Kay and OpenAI\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"KAY_API_KEY = getpass()\n",
|
||||
"OPENAI_API_KEY = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f7fcaf70-29a4-444b-8f07-9784f808c300",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"KAY_API_KEY\"] = KAY_API_KEY\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ac00bf93-3635-4ffe-b9a6-a8b4f35c0c85",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.retrievers import KayAiRetriever\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
|
||||
"retriever = KayAiRetriever.create(\n",
|
||||
" dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6\n",
|
||||
")\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8d9d927c-35b2-4a7b-8ea7-4d0350797941",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"-> **Question**: How is the healthcare industry adopting generative AI tools? \n",
|
||||
"\n",
|
||||
"**Answer**: The healthcare industry is adopting generative AI tools to improve various aspects of patient care and administrative tasks. Companies like HCA Healthcare Inc, Amazon Com Inc, and Mayo Clinic have collaborated with technology providers like Google Cloud, AWS, and Microsoft to implement generative AI solutions.\n",
|
||||
"\n",
|
||||
"HCA Healthcare is testing a nurse handoff tool that generates draft reports quickly and accurately, which nurses have shown interest in using. They are also exploring the use of Google's medically-tuned Med-PaLM 2 LLM to support caregivers in asking complex medical questions.\n",
|
||||
"\n",
|
||||
"Amazon Web Services (AWS) has introduced AWS HealthScribe, a generative AI-powered service that automatically creates clinical documentation. However, integrating multiple AI systems into a cohesive solution requires significant engineering resources, including access to AI experts, healthcare data, and compute capacity.\n",
|
||||
"\n",
|
||||
"Mayo Clinic is among the first healthcare organizations to deploy Microsoft 365 Copilot, a generative AI service that combines large language models with organizational data from Microsoft 365. This tool has the potential to automate tasks like form-filling, relieving administrative burdens on healthcare providers and allowing them to focus more on patient care.\n",
|
||||
"\n",
|
||||
"Overall, the healthcare industry is recognizing the potential benefits of generative AI tools in improving efficiency, automating tasks, and enhancing patient care. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# More sample questions in the Playground on https://kay.ai\n",
|
||||
"questions = [\n",
|
||||
" \"How is the healthcare industry adopting generative AI tools?\",\n",
|
||||
" # \"What are some recent challenges faced by the renewable energy sector?\",\n",
|
||||
"]\n",
|
||||
"chat_history = []\n",
|
||||
"\n",
|
||||
"for question in questions:\n",
|
||||
" result = qa({\"question\": question, \"chat_history\": chat_history})\n",
|
||||
" chat_history.append((question, result[\"answer\"]))\n",
|
||||
" print(f\"-> **Question**: {question} \\n\")\n",
|
||||
" print(f\"**Answer**: {result['answer']} \\n\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
292
cookbook/program_aided_language_model.ipynb
Normal file
292
cookbook/program_aided_language_model.ipynb
Normal file
@@ -0,0 +1,292 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "32e022a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Program-aided language model (PAL) chain\n",
|
||||
"\n",
|
||||
"Implements Program-Aided Language Models, as in https://arxiv.org/pdf/2211.10435.pdf.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1370e40f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain_experimental.pal_chain import PALChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9a58e15e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0, max_tokens=512)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "095adc76",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Math Prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "beddcac7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pal_chain = PALChain.from_math_prompt(llm, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "e2eab9d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3ef64b27",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mdef solution():\n",
|
||||
" \"\"\"Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?\"\"\"\n",
|
||||
" cindy_pets = 4\n",
|
||||
" marcia_pets = cindy_pets + 2\n",
|
||||
" jan_pets = marcia_pets * 3\n",
|
||||
" total_pets = cindy_pets + marcia_pets + jan_pets\n",
|
||||
" result = total_pets\n",
|
||||
" return result\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'28'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pal_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0269d20a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Colored Objects"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e524f81f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pal_chain = PALChain.from_colored_object_prompt(llm, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "03a237b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses. If I remove all the pairs of sunglasses from the desk, how many purple items remain on it?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "a84a4352",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m# Put objects into a list to record ordering\n",
|
||||
"objects = []\n",
|
||||
"objects += [('booklet', 'blue')] * 2\n",
|
||||
"objects += [('booklet', 'purple')] * 2\n",
|
||||
"objects += [('sunglasses', 'yellow')] * 2\n",
|
||||
"\n",
|
||||
"# Remove all pairs of sunglasses\n",
|
||||
"objects = [object for object in objects if object[0] != 'sunglasses']\n",
|
||||
"\n",
|
||||
"# Count number of purple objects\n",
|
||||
"num_purple = len([object for object in objects if object[1] == 'purple'])\n",
|
||||
"answer = num_purple\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished PALChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'2'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pal_chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc3d7f10",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Intermediate Steps\n",
|
||||
"You can also use the intermediate steps flag to return the code executed that generates the answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9d2d9c61",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pal_chain = PALChain.from_colored_object_prompt(\n",
|
||||
" llm, verbose=True, return_intermediate_steps=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b29b971b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses. If I remove all the pairs of sunglasses from the desk, how many purple items remain on it?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "a2c40c28",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m# Put objects into a list to record ordering\n",
|
||||
"objects = []\n",
|
||||
"objects += [('booklet', 'blue')] * 2\n",
|
||||
"objects += [('booklet', 'purple')] * 2\n",
|
||||
"objects += [('sunglasses', 'yellow')] * 2\n",
|
||||
"\n",
|
||||
"# Remove all pairs of sunglasses\n",
|
||||
"objects = [object for object in objects if object[0] != 'sunglasses']\n",
|
||||
"\n",
|
||||
"# Count number of purple objects\n",
|
||||
"num_purple = len([object for object in objects if object[1] == 'purple'])\n",
|
||||
"answer = num_purple\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = pal_chain({\"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "efddd033",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"# Put objects into a list to record ordering\\nobjects = []\\nobjects += [('booklet', 'blue')] * 2\\nobjects += [('booklet', 'purple')] * 2\\nobjects += [('sunglasses', 'yellow')] * 2\\n\\n# Remove all pairs of sunglasses\\nobjects = [object for object in objects if object[0] != 'sunglasses']\\n\\n# Count number of purple objects\\nnum_purple = len([object for object in objects if object[1] == 'purple'])\\nanswer = num_purple\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result[\"intermediate_steps\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dfd88594",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
179
cookbook/qa_citations.ipynb
Normal file
179
cookbook/qa_citations.ipynb
Normal file
@@ -0,0 +1,179 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9b5c258f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Citing retrieval sources\n",
|
||||
"\n",
|
||||
"This notebook shows how to use OpenAI functions ability to extract citations from text."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "eae4ca3e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.6.4) is available. It's recommended that you update to the latest version using `pip install -U deeplake`.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import create_citation_fuzzy_match_chain\n",
|
||||
"from langchain.chat_models import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2c6e62ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What did the author do during college?\"\n",
|
||||
"context = \"\"\"\n",
|
||||
"My name is Jason Liu, and I grew up in Toronto Canada but I was born in China.\n",
|
||||
"I went to an arts highschool but in university I studied Computational Mathematics and physics. \n",
|
||||
"As part of coop I worked at many companies including Stitchfix, Facebook.\n",
|
||||
"I also started the Data Science club at the University of Waterloo and I was the president of the club for 2 years.\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "078e0300",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "02cad6d0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_citation_fuzzy_match_chain(llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e3c6e7ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = chain.run(question=question, context=context)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "6f7615f2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"question='What did the author do during college?' answer=[FactWithEvidence(fact='The author studied Computational Mathematics and physics in university.', substring_quote=['in university I studied Computational Mathematics and physics']), FactWithEvidence(fact='The author started the Data Science club at the University of Waterloo and was the president of the club for 2 years.', substring_quote=['started the Data Science club at the University of Waterloo', 'president of the club for 2 years'])]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3be6f366",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def highlight(text, span):\n",
|
||||
" return (\n",
|
||||
" \"...\"\n",
|
||||
" + text[span[0] - 20 : span[0]]\n",
|
||||
" + \"*\"\n",
|
||||
" + \"\\033[91m\"\n",
|
||||
" + text[span[0] : span[1]]\n",
|
||||
" + \"\\033[0m\"\n",
|
||||
" + \"*\"\n",
|
||||
" + text[span[1] : span[1] + 20]\n",
|
||||
" + \"...\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "636c4528",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Statement: The author studied Computational Mathematics and physics in university.\n",
|
||||
"Citation: ...arts highschool but *\u001b[91min university I studied Computational Mathematics and physics\u001b[0m*. \n",
|
||||
"As part of coop I...\n",
|
||||
"\n",
|
||||
"Statement: The author started the Data Science club at the University of Waterloo and was the president of the club for 2 years.\n",
|
||||
"Citation: ...x, Facebook.\n",
|
||||
"I also *\u001b[91mstarted the Data Science club at the University of Waterloo\u001b[0m* and I was the presi...\n",
|
||||
"Citation: ...erloo and I was the *\u001b[91mpresident of the club for 2 years\u001b[0m*.\n",
|
||||
"...\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for fact in result.answer:\n",
|
||||
" print(\"Statement:\", fact.fact)\n",
|
||||
" for span in fact.get_spans(context):\n",
|
||||
" print(\"Citation:\", highlight(context, span))\n",
|
||||
" print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8409cab0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
181
cookbook/qianfan_baidu_elasticesearch_RAG.ipynb
Normal file
181
cookbook/qianfan_baidu_elasticesearch_RAG.ipynb
Normal file
@@ -0,0 +1,181 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG based on Qianfan and BES"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook is an implementation of Retrieval augmented generation (RAG) using Baidu Qianfan Platform combined with Baidu ElasricSearch, where the original data is located on BOS.\n",
|
||||
"## Baidu Qianfan\n",
|
||||
"Baidu AI Cloud Qianfan Platform is a one-stop large model development and service operation platform for enterprise developers. Qianfan not only provides including the model of Wenxin Yiyan (ERNIE-Bot) and the third-party open-source models, but also provides various AI development tools and the whole set of development environment, which facilitates customers to use and develop large model applications easily.\n",
|
||||
"\n",
|
||||
"## Baidu ElasticSearch\n",
|
||||
"[Baidu Cloud VectorSearch](https://cloud.baidu.com/doc/BES/index.html?from=productToDoc) is a fully managed, enterprise-level distributed search and analysis service which is 100% compatible to open source. Baidu Cloud VectorSearch provides low-cost, high-performance, and reliable retrieval and analysis platform level product services for structured/unstructured data. As a vector database , it supports multiple index types and similarity distance methods. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation and Setup\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install qianfan\n",
|
||||
"#!pip install bce-python-sdk\n",
|
||||
"#!pip install elasticsearch == 7.11.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from baidubce.auth.bce_credentials import BceCredentials\n",
|
||||
"from baidubce.bce_client_configuration import BceClientConfiguration\n",
|
||||
"from langchain.document_loaders.baiducloud_bos_directory import BaiduBOSDirectoryLoader\n",
|
||||
"from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n",
|
||||
"from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import BESVectorStore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Document loading"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bos_host = \"your bos eddpoint\"\n",
|
||||
"access_key_id = \"your bos access ak\"\n",
|
||||
"secret_access_key = \"your bos access sk\"\n",
|
||||
"\n",
|
||||
"# create BceClientConfiguration\n",
|
||||
"config = BceClientConfiguration(\n",
|
||||
" credentials=BceCredentials(access_key_id, secret_access_key), endpoint=bos_host\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = BaiduBOSDirectoryLoader(conf=config, bucket=\"llm-test\", prefix=\"llm/\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0)\n",
|
||||
"split_docs = text_splitter.split_documents(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Embedding and VectorStore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = HuggingFaceEmbeddings(model_name=\"shibing624/text2vec-base-chinese\")\n",
|
||||
"embeddings.client = sentence_transformers.SentenceTransformer(embeddings.model_name)\n",
|
||||
"\n",
|
||||
"db = BESVectorStore.from_documents(\n",
|
||||
" documents=split_docs,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" bes_url=\"your bes url\",\n",
|
||||
" index_name=\"test-index\",\n",
|
||||
" vector_query_field=\"vector\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"db.client.indices.refresh(index=\"test-index\")\n",
|
||||
"retriever = db.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## QA Retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = QianfanLLMEndpoint(\n",
|
||||
" model=\"ERNIE-Bot\",\n",
|
||||
" qianfan_ak=\"your qianfan ak\",\n",
|
||||
" qianfan_sk=\"your qianfan sk\",\n",
|
||||
" streaming=True,\n",
|
||||
")\n",
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=llm, chain_type=\"refine\", retriever=retriever, return_source_documents=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"什么是张量?\"\n",
|
||||
"print(qa.run(query))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> 张量(Tensor)是一个数学概念,用于表示多维数据。它是一个可以表示多个数值的数组,可以是标量、向量、矩阵等。在深度学习和人工智能领域中,张量常用于表示神经网络的输入、输出和权重等。"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.17"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
271
cookbook/rag_fusion.ipynb
Normal file
271
cookbook/rag_fusion.ipynb
Normal file
@@ -0,0 +1,271 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "993c2768",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG Fusion\n",
|
||||
"\n",
|
||||
"Re-implemented from [this GitHub repo](https://github.com/Raudaschl/rag-fusion), all credit to original author\n",
|
||||
"\n",
|
||||
"> RAG-Fusion, a search methodology that aims to bridge the gap between traditional search paradigms and the multifaceted dimensions of human queries. Inspired by the capabilities of Retrieval Augmented Generation (RAG), this project goes a step further by employing multiple query generation and Reciprocal Rank Fusion to re-rank search results."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ebcc6791",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"For this example, we will use Pinecone and some fake data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "661a1c36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pinecone\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Pinecone\n",
|
||||
"\n",
|
||||
"pinecone.init(api_key=\"...\", environment=\"...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "48ef7e93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"all_documents = {\n",
|
||||
" \"doc1\": \"Climate change and economic impact.\",\n",
|
||||
" \"doc2\": \"Public health concerns due to climate change.\",\n",
|
||||
" \"doc3\": \"Climate change: A social perspective.\",\n",
|
||||
" \"doc4\": \"Technological solutions to climate change.\",\n",
|
||||
" \"doc5\": \"Policy changes needed to combat climate change.\",\n",
|
||||
" \"doc6\": \"Climate change and its impact on biodiversity.\",\n",
|
||||
" \"doc7\": \"Climate change: The science and models.\",\n",
|
||||
" \"doc8\": \"Global warming: A subset of climate change.\",\n",
|
||||
" \"doc9\": \"How climate change affects daily weather.\",\n",
|
||||
" \"doc10\": \"The history of climate change activism.\",\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fde89f0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_texts(\n",
|
||||
" list(all_documents.values()), OpenAIEmbeddings(), index_name=\"rag-fusion\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22ddd041",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the Query Generator\n",
|
||||
"\n",
|
||||
"We will now define a chain to do the query generation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "1d547524",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 68,
|
||||
"id": "af9ab4db",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"prompt = hub.pull(\"langchain-ai/rag-fusion-query-generation\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3628b552",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# prompt = ChatPromptTemplate.from_messages([\n",
|
||||
"# (\"system\", \"You are a helpful assistant that generates multiple search queries based on a single input query.\"),\n",
|
||||
"# (\"user\", \"Generate multiple search queries related to: {original_query}\"),\n",
|
||||
"# (\"user\", \"OUTPUT (4 queries):\")\n",
|
||||
"# ])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8d6cbb73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"generate_queries = (\n",
|
||||
" prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split(\"\\n\"))\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ee2824cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the full chain\n",
|
||||
"\n",
|
||||
"We can now put it all together and define the full chain. This chain:\n",
|
||||
" \n",
|
||||
" 1. Generates a bunch of queries\n",
|
||||
" 2. Looks up each query in the retriever\n",
|
||||
" 3. Joins all the results together using reciprocal rank fusion\n",
|
||||
" \n",
|
||||
" \n",
|
||||
"Note that it does NOT do a final generation step"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "ca0bfec4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"original_query = \"impact of climate change\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 75,
|
||||
"id": "02437d65",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_existing_index(\"rag-fusion\", OpenAIEmbeddings())\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 76,
|
||||
"id": "46a9a0e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.load import dumps, loads\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def reciprocal_rank_fusion(results: list[list], k=60):\n",
|
||||
" fused_scores = {}\n",
|
||||
" for docs in results:\n",
|
||||
" # Assumes the docs are returned in sorted order of relevance\n",
|
||||
" for rank, doc in enumerate(docs):\n",
|
||||
" doc_str = dumps(doc)\n",
|
||||
" if doc_str not in fused_scores:\n",
|
||||
" fused_scores[doc_str] = 0\n",
|
||||
" previous_score = fused_scores[doc_str]\n",
|
||||
" fused_scores[doc_str] += 1 / (rank + k)\n",
|
||||
"\n",
|
||||
" reranked_results = [\n",
|
||||
" (loads(doc), score)\n",
|
||||
" for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)\n",
|
||||
" ]\n",
|
||||
" return reranked_results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 77,
|
||||
"id": "3f9d4502",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = generate_queries | retriever.map() | reciprocal_rank_fusion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 78,
|
||||
"id": "d70c4fcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[(Document(page_content='Climate change and economic impact.'),\n",
|
||||
" 0.06558258417063283),\n",
|
||||
" (Document(page_content='Climate change: A social perspective.'),\n",
|
||||
" 0.06400409626216078),\n",
|
||||
" (Document(page_content='How climate change affects daily weather.'),\n",
|
||||
" 0.04787506400409626),\n",
|
||||
" (Document(page_content='Climate change and its impact on biodiversity.'),\n",
|
||||
" 0.03306010928961749),\n",
|
||||
" (Document(page_content='Public health concerns due to climate change.'),\n",
|
||||
" 0.016666666666666666),\n",
|
||||
" (Document(page_content='Technological solutions to climate change.'),\n",
|
||||
" 0.016666666666666666),\n",
|
||||
" (Document(page_content='Policy changes needed to combat climate change.'),\n",
|
||||
" 0.01639344262295082)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 78,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"original_query\": original_query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7866e551",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
689
cookbook/retrieval_in_sql.ipynb
Normal file
689
cookbook/retrieval_in_sql.ipynb
Normal file
@@ -0,0 +1,689 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Incoporating semantic similarity in tabular databases\n",
|
||||
"\n",
|
||||
"In this notebook we will cover how to run semantic search over a specific table column within a single SQL query, combining tabular query with RAG.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Overall workflow\n",
|
||||
"\n",
|
||||
"1. Generating embeddings for a specific column\n",
|
||||
"2. Storing the embeddings in a new column (if column has low cardinality, it's better to use another table containing unique values and their embeddings)\n",
|
||||
"3. Querying using standard SQL queries with [PGVector](https://github.com/pgvector/pgvector) extension which allows using L2 distance (`<->`), Cosine distance (`<=>` or cosine similarity using `1 - <=>`) and Inner product (`<#>`)\n",
|
||||
"4. Running standard SQL query\n",
|
||||
"\n",
|
||||
"### Requirements\n",
|
||||
"\n",
|
||||
"We will need a PostgreSQL database with [pgvector](https://github.com/pgvector/pgvector) extension enabled. For this example, we will use a `Chinook` database using a local PostgreSQL server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = os.environ.get(\"OPENAI_API_KEY\") or getpass.getpass(\n",
|
||||
" \"OpenAI API Key:\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.sql_database import SQLDatabase\n",
|
||||
"\n",
|
||||
"CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n",
|
||||
"db = SQLDatabase.from_uri(CONNECTION_STRING)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embedding the song titles"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For this example, we will run queries based on semantic meaning of song titles. In order to do this, let's start by adding a new column in the table for storing the embeddings:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# db.run('ALTER TABLE \"Track\" ADD COLUMN \"embeddings\" vector;')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's generate the embedding for each *track title* and store it as a new column in our \"Track\" table"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings_model = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"3503"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tracks = db.run('SELECT \"Name\" FROM \"Track\"')\n",
|
||||
"song_titles = [s[0] for s in eval(tracks)]\n",
|
||||
"title_embeddings = embeddings_model.embed_documents(song_titles)\n",
|
||||
"len(title_embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's insert the embeddings in the into the new column from our table"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from tqdm import tqdm\n",
|
||||
"\n",
|
||||
"for i in tqdm(range(len(title_embeddings))):\n",
|
||||
" title = titles[i].replace(\"'\", \"''\")\n",
|
||||
" embedding = title_embeddings[i]\n",
|
||||
" sql_command = (\n",
|
||||
" f'UPDATE \"Track\" SET \"embeddings\" = ARRAY{embedding} WHERE \"Name\" ='\n",
|
||||
" + f\"'{title}'\"\n",
|
||||
" )\n",
|
||||
" db.run(sql_command)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can test the semantic search running the following query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'[(\"Tomorrow\\'s Dream\",), (\\'Remember Tomorrow\\',), (\\'Remember Tomorrow\\',), (\\'The Best Is Yet To Come\\',), (\"Thinking \\'Bout Tomorrow\",)]'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeded_title = embeddings_model.embed_query(\"hope about the future\")\n",
|
||||
"query = (\n",
|
||||
" 'SELECT \"Track\".\"Name\" FROM \"Track\" WHERE \"Track\".\"embeddings\" IS NOT NULL ORDER BY \"embeddings\" <-> '\n",
|
||||
" + f\"'{embeded_title}' LIMIT 5\"\n",
|
||||
")\n",
|
||||
"db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Creating the SQL Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's start by defining useful functions to get info from database and running the query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_schema(_):\n",
|
||||
" return db.get_table_info()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def run_query(query):\n",
|
||||
" return db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's build the **prompt** we will use. This prompt is an extension from [text-to-postgres-sql](https://smith.langchain.com/hub/jacob/text-to-postgres-sql?organizationId=f9b614b8-5c3a-4e7c-afbc-6d7ad4fd8892) prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"You are a Postgres expert. Given an input question, first create a syntactically correct Postgres query to run, then look at the results of the query and return the answer to the input question.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database.\n",
|
||||
"Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (\") to denote them as delimited identifiers.\n",
|
||||
"Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.\n",
|
||||
"Pay attention to use date('now') function to get the current date, if the question involves \"today\".\n",
|
||||
"\n",
|
||||
"You can use an extra extension which allows you to run semantic similarity using <-> operator on tables containing columns named \"embeddings\".\n",
|
||||
"<-> operator can ONLY be used on embeddings columns.\n",
|
||||
"The embeddings value for a given row typically represents the semantic meaning of that row.\n",
|
||||
"The vector represents an embedding representation of the question, given below. \n",
|
||||
"Do NOT fill in the vector values directly, but rather specify a `[search_word]` placeholder, which should contain the word that would be embedded for filtering.\n",
|
||||
"For example, if the user asks for songs about 'the feeling of loneliness' the query could be:\n",
|
||||
"'SELECT \"[whatever_table_name]\".\"SongName\" FROM \"[whatever_table_name]\" ORDER BY \"embeddings\" <-> '[loneliness]' LIMIT 5'\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: <Question here>\n",
|
||||
"SQLQuery: <SQL Query to run>\n",
|
||||
"SQLResult: <Result of the SQLQuery>\n",
|
||||
"Answer: <Final answer here>\n",
|
||||
"\n",
|
||||
"Only use the following tables:\n",
|
||||
"\n",
|
||||
"{schema}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", template), (\"human\", \"{question}\")]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we can create the chain using **[LangChain Expression Language](https://python.langchain.com/docs/expression_language/)**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\n",
|
||||
" CONNECTION_STRING\n",
|
||||
") # We reconnect to db so the new columns are loaded as well.\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
|
||||
"\n",
|
||||
"sql_query_chain = (\n",
|
||||
" RunnablePassthrough.assign(schema=get_schema)\n",
|
||||
" | prompt\n",
|
||||
" | llm.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'SQLQuery: SELECT \"Track\".\"Name\" FROM \"Track\" JOIN \"Genre\" ON \"Track\".\"GenreId\" = \"Genre\".\"GenreId\" WHERE \"Genre\".\"Name\" = \\'Rock\\' ORDER BY \"Track\".\"embeddings\" <-> \\'[dispair]\\' LIMIT 5'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sql_query_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"Which are the 5 rock songs with titles about deep feeling of dispair?\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This chain simply generates the query. Now we will create the full chain that also handles the execution and the final result for the user:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"from langchain.schema.runnable import RunnableLambda\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def replace_brackets(match):\n",
|
||||
" words_inside_brackets = match.group(1).split(\", \")\n",
|
||||
" embedded_words = [\n",
|
||||
" str(embeddings_model.embed_query(word)) for word in words_inside_brackets\n",
|
||||
" ]\n",
|
||||
" return \"', '\".join(embedded_words)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_query(query):\n",
|
||||
" sql_query = re.sub(r\"\\[([\\w\\s,]+)\\]\", replace_brackets, query)\n",
|
||||
" return sql_query\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", template), (\"human\", \"{question}\")]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(query=sql_query_chain)\n",
|
||||
" | RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" response=RunnableLambda(lambda x: db.run(get_query(x[\"query\"]))),\n",
|
||||
" )\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example 1: Filtering a column based on semantic meaning"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's say we want to retrieve songs that express `deep feeling of dispair`, but filtering based on genre:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"The 5 rock songs with titles that convey a deep feeling of despair are 'Sea Of Sorrow', 'Surrender', 'Indifference', 'Hard Luck Woman', and 'Desire'.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"Which are the 5 rock songs with titles about deep feeling of dispair?\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"What is substantially different in implementing this method is that we have combined:\n",
|
||||
"- Semantic search (songs that have titles with some semantic meaning)\n",
|
||||
"- Traditional tabular querying (running JOIN statements to filter track based on genre)\n",
|
||||
"\n",
|
||||
"This is something we _could_ potentially achieve using metadata filtering, but it's more complex to do so (we would need to use a vector database containing the embeddings, and use metadata filtering based on genre).\n",
|
||||
"\n",
|
||||
"However, for other use cases metadata filtering **wouldn't be enough**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example 2: Combining filters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"The three albums which have the most amount of songs in the top 150 saddest songs are 'International Superhits' with 5 songs, 'Ten' with 4 songs, and 'Album Of The Year' with 3 songs.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"I want to know the 3 albums which have the most amount of songs in the top 150 saddest songs\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So we have result for 3 albums with most amount of songs in top 150 saddest ones. This **wouldn't** be possible using only standard metadata filtering. Without this _hybdrid query_, we would need some postprocessing to get the result.\n",
|
||||
"\n",
|
||||
"Another similar exmaple:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"The 6 albums with the shortest titles that contain songs which are in the 20 saddest song list are 'Ten', 'Core', 'Big Ones', 'One By One', 'Black Album', and 'Miles Ahead'.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"I need the 6 albums with shortest title, as long as they contain songs which are in the 20 saddest song list.\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's see what the query looks like to double check:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WITH \"SadSongs\" AS (\n",
|
||||
" SELECT \"TrackId\" FROM \"Track\" \n",
|
||||
" ORDER BY \"embeddings\" <-> '[sad]' LIMIT 20\n",
|
||||
"),\n",
|
||||
"\"SadAlbums\" AS (\n",
|
||||
" SELECT DISTINCT \"AlbumId\" FROM \"Track\" \n",
|
||||
" WHERE \"TrackId\" IN (SELECT \"TrackId\" FROM \"SadSongs\")\n",
|
||||
")\n",
|
||||
"SELECT \"Album\".\"Title\" FROM \"Album\" \n",
|
||||
"WHERE \"AlbumId\" IN (SELECT \"AlbumId\" FROM \"SadAlbums\") \n",
|
||||
"ORDER BY \"title_len\" ASC \n",
|
||||
"LIMIT 6\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(\n",
|
||||
" sql_query_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"I need the 6 albums with shortest title, as long as they contain songs which are in the 20 saddest song list.\"\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example 3: Combining two separate semantic searches\n",
|
||||
"\n",
|
||||
"One interesting aspect of this approach which is **substantially different from using standar RAG** is that we can even **combine** two semantic search filters:\n",
|
||||
"- _Get 5 saddest songs..._\n",
|
||||
"- _**...obtained from albums with \"lovely\" titles**_\n",
|
||||
"\n",
|
||||
"This could generalize to **any kind of combined RAG** (paragraphs discussing _X_ topic belonging from books about _Y_, replies to a tweet about _ABC_ topic that express _XYZ_ feeling)\n",
|
||||
"\n",
|
||||
"We will combine semantic search on songs and album titles, so we need to do the same for `Album` table:\n",
|
||||
"1. Generate the embeddings\n",
|
||||
"2. Add them to the table as a new column (which we need to add in the table)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# db.run('ALTER TABLE \"Album\" ADD COLUMN \"embeddings\" vector;')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 347/347 [00:01<00:00, 179.64it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"albums = db.run('SELECT \"Title\" FROM \"Album\"')\n",
|
||||
"album_titles = [title[0] for title in eval(albums)]\n",
|
||||
"album_title_embeddings = embeddings_model.embed_documents(album_titles)\n",
|
||||
"for i in tqdm(range(len(album_title_embeddings))):\n",
|
||||
" album_title = album_titles[i].replace(\"'\", \"''\")\n",
|
||||
" album_embedding = album_title_embeddings[i]\n",
|
||||
" sql_command = (\n",
|
||||
" f'UPDATE \"Album\" SET \"embeddings\" = ARRAY{album_embedding} WHERE \"Title\" ='\n",
|
||||
" + f\"'{album_title}'\"\n",
|
||||
" )\n",
|
||||
" db.run(sql_command)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"[('Realize',), ('Morning Dance',), ('Into The Light',), ('New Adventures In Hi-Fi',), ('Miles Ahead',)]\""
|
||||
]
|
||||
},
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeded_title = embeddings_model.embed_query(\"hope about the future\")\n",
|
||||
"query = (\n",
|
||||
" 'SELECT \"Album\".\"Title\" FROM \"Album\" WHERE \"Album\".\"embeddings\" IS NOT NULL ORDER BY \"embeddings\" <-> '\n",
|
||||
" + f\"'{embeded_title}' LIMIT 5\"\n",
|
||||
")\n",
|
||||
"db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can combine both filters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 54,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(\n",
|
||||
" CONNECTION_STRING\n",
|
||||
") # We reconnect to dbso the new columns are loaded as well."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The songs about breakouts obtained from the top 5 albums about love are \\'Royal Orleans\\', \"Nobody\\'s Fault But Mine\", \\'Achilles Last Stand\\', \\'For Your Life\\', and \\'Hots On For Nowhere\\'.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 49,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"I want to know songs about breakouts obtained from top 5 albums about love\"\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is something **different** that **couldn't be achieved** using standard metadata filtering over a vectordb."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
351
cookbook/rewrite.ipynb
Normal file
351
cookbook/rewrite.ipynb
Normal file
@@ -0,0 +1,351 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "260629f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Rewrite-Retrieve-Read\n",
|
||||
"\n",
|
||||
"**Rewrite-Retrieve-Read** is a method proposed in the paper [Query Rewriting for Retrieval-Augmented Large Language Models](https://arxiv.org/pdf/2305.14283.pdf)\n",
|
||||
"\n",
|
||||
"> Because the original query can not be always optimal to retrieve for the LLM, especially in the real world... we first prompt an LLM to rewrite the queries, then conduct retrieval-augmented reading\n",
|
||||
"\n",
|
||||
"We show how you can easily do that with LangChain Expression Language"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eda93712",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Baseline\n",
|
||||
"\n",
|
||||
"Baseline RAG (**Retrieve-and-read**) can be done like the following:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1d2edbd2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"from langchain.utilities import DuckDuckGoSearchAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "86a46aa9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Answer the users question based only on the following context:\n",
|
||||
"\n",
|
||||
"<context>\n",
|
||||
"{context}\n",
|
||||
"</context>\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"search = DuckDuckGoSearchAPIWrapper()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def retriever(query):\n",
|
||||
" return search.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8566d48e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "5c57f9ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"simple_query = \"what is langchain?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "37c5f962",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"LangChain is a powerful and versatile Python library that enables developers and researchers to create, experiment with, and analyze language models and agents. It simplifies the development of language-based applications by providing a suite of features for artificial general intelligence. It can be used to build chatbots, perform document analysis and summarization, and streamline interaction with various large language model providers. LangChain's unique proposition is its ability to create logical links between one or more language models, known as Chains. It is an open-source library that offers a generic interface to foundation models and allows prompt management and integration with other components and tools.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(simple_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23bdb9bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"While this is fine for well formatted queries, it can break down for more complicated queries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "8df6a814",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"distracted_query = \"man that sam bankman fried trial was crazy! what is langchain?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "16d7db64",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Based on the given context, there is no information provided about \"langchain.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(distracted_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0b4f8b93",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is because the retriever does a bad job with these \"distracted\" queries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "3439d8dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Business She\\'s the star witness against Sam Bankman-Fried. Her testimony was explosive Gary Wang, who co-founded both FTX and Alameda Research, said Bankman-Fried directed him to change a... The Verge, following the trial\\'s Oct. 4 kickoff: \"Is Sam Bankman-Fried\\'s Defense Even Trying to Win?\". CBS Moneywatch, from Thursday: \"Sam Bankman-Fried\\'s Lawyer Struggles to Poke ... Sam Bankman-Fried, FTX\\'s founder, responded with a single word: \"Oof.\". Less than a year later, Mr. Bankman-Fried, 31, is on trial in federal court in Manhattan, fighting criminal charges ... July 19, 2023. A U.S. judge on Wednesday overruled objections by Sam Bankman-Fried\\'s lawyers and allowed jurors in the FTX founder\\'s fraud trial to see a profane message he sent to a reporter days ... Sam Bankman-Fried, who was once hailed as a virtuoso in cryptocurrency trading, is on trial over the collapse of FTX, the financial exchange he founded. Bankman-Fried is accused of...'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever(distracted_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7eb748ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Rewrite-Retrieve-Read Implementation\n",
|
||||
"\n",
|
||||
"The main part is a rewriter to rewrite the search query"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "88ae702e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Provide a better search query for \\\n",
|
||||
"web search engine to answer the given question, end \\\n",
|
||||
"the queries with ’**’. Question: \\\n",
|
||||
"{x} Answer:\"\"\"\n",
|
||||
"rewrite_prompt = ChatPromptTemplate.from_template(template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "184e1bcb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"rewrite_prompt = hub.pull(\"langchain-ai/rewrite\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "a4c23d40",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Provide a better search query for web search engine to answer the given question, end the queries with ’**’. Question {x} Answer:\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(rewrite_prompt.template)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "f55cd010",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Parser to remove the `**`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _parse(text):\n",
|
||||
" return text.strip(\"**\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "c9c34bef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rewriter = rewrite_prompt | ChatOpenAI(temperature=0) | StrOutputParser() | _parse"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "fb17fb3d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'What is the definition and purpose of Langchain?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rewriter.invoke({\"x\": distracted_query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "f83edb09",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rewrite_retrieve_read_chain = (\n",
|
||||
" {\n",
|
||||
" \"context\": {\"x\": RunnablePassthrough()} | rewriter | retriever,\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | model\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "43096322",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Based on the given context, LangChain is an open-source framework designed to simplify the creation of applications using large language models (LLMs). It enables LLM models to generate responses based on up-to-date online information and simplifies the organization of large volumes of data for easy access by LLMs. LangChain offers a standard interface for chains, integrations with other tools, and end-to-end chains for common applications. It is a robust library that streamlines interaction with various LLM providers. LangChain\\'s unique proposition is its ability to create logical links between one or more LLMs, known as Chains. It is an AI framework with features that simplify the development of language-based applications and offers a suite of features for artificial general intelligence. However, the context does not provide any information about the \"sam bankman fried trial\" mentioned in the question.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rewrite_retrieve_read_chain.invoke(distracted_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59874b4f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
1162
cookbook/sales_agent_with_context.ipynb
Normal file
1162
cookbook/sales_agent_with_context.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
175
cookbook/selecting_llms_based_on_context_length.ipynb
Normal file
175
cookbook/selecting_llms_based_on_context_length.ipynb
Normal file
@@ -0,0 +1,175 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e93283d1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Selecting LLMs based on Context Length\n",
|
||||
"\n",
|
||||
"Different LLMs have different context lengths. As a very immediate an practical example, OpenAI has two versions of GPT-3.5-Turbo: one with 4k context, another with 16k context. This notebook shows how to route between them based on input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "cc453450",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"from langchain.schema.prompt import PromptValue"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "1cec6a10",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"short_context_model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
|
||||
"long_context_model = ChatOpenAI(model=\"gpt-3.5-turbo-16k\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "772da153",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_context_length(prompt: PromptValue):\n",
|
||||
" messages = prompt.to_messages()\n",
|
||||
" tokens = short_context_model.get_num_tokens_from_messages(messages)\n",
|
||||
" return tokens"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "db771e20",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = PromptTemplate.from_template(\"Summarize this passage: {context}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "af057e2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def choose_model(prompt: PromptValue):\n",
|
||||
" context_len = get_context_length(prompt)\n",
|
||||
" if context_len < 30:\n",
|
||||
" print(\"short model\")\n",
|
||||
" return short_context_model\n",
|
||||
" else:\n",
|
||||
" print(\"long model\")\n",
|
||||
" return long_context_model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "84f3e07d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | choose_model | StrOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "d8b14f8f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"short model\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The passage mentions that a frog visited a pond.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"context\": \"a frog went to a pond\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "70ebd3dd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"long model\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The passage describes a frog that moved from one pond to another and perched on a log.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\n",
|
||||
" {\"context\": \"a frog went to a pond and sat on a log and went to a different pond\"}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a7e29fef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user