mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-21 21:56:38 +00:00
Compare commits
594 Commits
langchain-
...
cc/token_c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c391fb1f31 | ||
|
|
4804bd6ec2 | ||
|
|
10087ac024 | ||
|
|
f752c1a07f | ||
|
|
7902fa3238 | ||
|
|
4be9407b09 | ||
|
|
9225bff326 | ||
|
|
d4cb740e0c | ||
|
|
e5c9912a89 | ||
|
|
8bca31f8c4 | ||
|
|
c5baa3ac27 | ||
|
|
795e746ca7 | ||
|
|
6519a5675b | ||
|
|
e9f7cd3e0e | ||
|
|
5c94e47d14 | ||
|
|
e0950f29b7 | ||
|
|
71778cb721 | ||
|
|
37d8666276 | ||
|
|
c286c06f16 | ||
|
|
b83e9b1056 | ||
|
|
c1f66611fc | ||
|
|
f93bc48915 | ||
|
|
516d74b6df | ||
|
|
c85f7b6061 | ||
|
|
f167c35243 | ||
|
|
b8a76cb6e9 | ||
|
|
dbcdf0b702 | ||
|
|
beb2ee6edf | ||
|
|
9f61ed8b81 | ||
|
|
6cff82d02e | ||
|
|
0cd72b50fb | ||
|
|
1a3cd46d88 | ||
|
|
470160cf81 | ||
|
|
20b8342fdf | ||
|
|
2f8af61218 | ||
|
|
81758e22f3 | ||
|
|
54241f4d06 | ||
|
|
7c9223d2b2 | ||
|
|
3342e4d62d | ||
|
|
5842110dbc | ||
|
|
62db04c43a | ||
|
|
fb892ee50a | ||
|
|
8ad0e9f267 | ||
|
|
d0b13e926d | ||
|
|
6fa4a45311 | ||
|
|
97dd5f2cb8 | ||
|
|
2a82fbc0ff | ||
|
|
0e5e33ba03 | ||
|
|
fc35544e0d | ||
|
|
15cc090e52 | ||
|
|
0f940d74b2 | ||
|
|
7829b722b1 | ||
|
|
914730cf8d | ||
|
|
c3738ea376 | ||
|
|
cd124a0949 | ||
|
|
57ff48e62e | ||
|
|
bc232e6d03 | ||
|
|
be32382d92 | ||
|
|
16c984ef0a | ||
|
|
13dd115d1d | ||
|
|
75d365418b | ||
|
|
2cff369cdc | ||
|
|
f5b6eecf72 | ||
|
|
a528ea1796 | ||
|
|
bf6a5eb122 | ||
|
|
5720dea41b | ||
|
|
087107557f | ||
|
|
05ba853548 | ||
|
|
3fb90666be | ||
|
|
6a2a149f89 | ||
|
|
bbc1d46efe | ||
|
|
d6b5f05f33 | ||
|
|
10377a7373 | ||
|
|
373ad8ac2c | ||
|
|
5eec11e2db | ||
|
|
badc0cf1b6 | ||
|
|
3b7abdff96 | ||
|
|
4aebfbad59 | ||
|
|
ae1f03fbe0 | ||
|
|
46dbb3967e | ||
|
|
dd0b990ba5 | ||
|
|
5aa46501cf | ||
|
|
92df109dd5 | ||
|
|
d27fb0c432 | ||
|
|
69dd39c461 | ||
|
|
41cebfe4fb | ||
|
|
5350967ddc | ||
|
|
7542278997 | ||
|
|
ff6e3558d7 | ||
|
|
585e12e53b | ||
|
|
73ba156a7d | ||
|
|
395c8d0bd4 | ||
|
|
34d31b8394 | ||
|
|
2aa0555941 | ||
|
|
dff229d018 | ||
|
|
b009ca4d23 | ||
|
|
0254c12cb0 | ||
|
|
2faed37ff1 | ||
|
|
d886dcfba5 | ||
|
|
91d5ca275d | ||
|
|
dcb670f395 | ||
|
|
85012ae601 | ||
|
|
aa0f4fb927 | ||
|
|
d18cdc6f32 | ||
|
|
8a5f46322b | ||
|
|
a0e86b18bf | ||
|
|
6affec92ce | ||
|
|
a64aee310c | ||
|
|
ba6c2590ae | ||
|
|
bb71f53585 | ||
|
|
9875ffbabc | ||
|
|
b5efafe80c | ||
|
|
ff3353f02f | ||
|
|
3ace4e3680 | ||
|
|
80c397019f | ||
|
|
4a42158e6c | ||
|
|
7ba3e80057 | ||
|
|
50e27a447b | ||
|
|
78c10f8790 | ||
|
|
ccfc9f795a | ||
|
|
b21926fe6c | ||
|
|
f1ad0da8f5 | ||
|
|
f67af34ea0 | ||
|
|
3030ffc248 | ||
|
|
1ad9de4b45 | ||
|
|
b95cb770e8 | ||
|
|
1867521d1a | ||
|
|
8e3ca21bd3 | ||
|
|
e92c817518 | ||
|
|
28727618b3 | ||
|
|
3108b14164 | ||
|
|
1922adc092 | ||
|
|
4a242a8a4f | ||
|
|
064b37f90e | ||
|
|
062678fa18 | ||
|
|
5d3e3d3f31 | ||
|
|
5a7cf87626 | ||
|
|
c63f23d233 | ||
|
|
b7091d391d | ||
|
|
7a2952210e | ||
|
|
7549845d82 | ||
|
|
878f033ed7 | ||
|
|
4065106c2e | ||
|
|
12df938ace | ||
|
|
65ee43cc10 | ||
|
|
fe7c000fc1 | ||
|
|
dad50e5624 | ||
|
|
0a6d01e61d | ||
|
|
c6f8b0875a | ||
|
|
4c3800d743 | ||
|
|
7fe1c4b78f | ||
|
|
c375732396 | ||
|
|
9c21f83e82 | ||
|
|
880652b713 | ||
|
|
4ab94579ad | ||
|
|
eb0545a173 | ||
|
|
a2e389de9f | ||
|
|
01573c1375 | ||
|
|
2ba3ce81a6 | ||
|
|
4e4e5d7337 | ||
|
|
2a863727f9 | ||
|
|
30e2260e26 | ||
|
|
cbaea351b2 | ||
|
|
f070217c3b | ||
|
|
0915682c12 | ||
|
|
68ab9a1e56 | ||
|
|
47b79c30c0 | ||
|
|
5899f980aa | ||
|
|
b0bf4afe81 | ||
|
|
33e5d01f7c | ||
|
|
ee3373afc2 | ||
|
|
b296f103a9 | ||
|
|
525d5c0169 | ||
|
|
c4b6ba254e | ||
|
|
b7d1831f9d | ||
|
|
328ba36601 | ||
|
|
6f677ef5c1 | ||
|
|
d47d41cbd3 | ||
|
|
32bbe99efc | ||
|
|
990e346c46 | ||
|
|
9b7792631d | ||
|
|
558a8fe25b | ||
|
|
52b1516d44 | ||
|
|
8a3bb73c05 | ||
|
|
099c042395 | ||
|
|
2d4f00a451 | ||
|
|
9bd401a6d4 | ||
|
|
6aa3794b74 | ||
|
|
189dcf7295 | ||
|
|
1bc88028e6 | ||
|
|
d2942351ce | ||
|
|
83c078f363 | ||
|
|
26d39ffc4a | ||
|
|
421e2ceeee | ||
|
|
275dcbf69f | ||
|
|
9f87b27a5b | ||
|
|
b2e1196e29 | ||
|
|
2dc1396380 | ||
|
|
77941ab3ce | ||
|
|
ee19a30dde | ||
|
|
5d799b3174 | ||
|
|
8f33a985a2 | ||
|
|
78eeccef0e | ||
|
|
3d415441e8 | ||
|
|
74385e0ebd | ||
|
|
2bfbc29ccc | ||
|
|
ef79c26f18 | ||
|
|
fbe32c8e89 | ||
|
|
2511c28f92 | ||
|
|
637bb1cbbc | ||
|
|
3dfea96ec1 | ||
|
|
68643153e5 | ||
|
|
462762f75b | ||
|
|
4f3729c004 | ||
|
|
ba428cdf54 | ||
|
|
69c7d1b01b | ||
|
|
733299ec13 | ||
|
|
e1adf781c6 | ||
|
|
31b5e4810c | ||
|
|
c6801fe159 | ||
|
|
1b563067f8 | ||
|
|
1996d81d72 | ||
|
|
ab0677c6f1 | ||
|
|
bdb53c93cc | ||
|
|
94d5271cb5 | ||
|
|
e499db4266 | ||
|
|
cc3af82b47 | ||
|
|
9383b78be1 | ||
|
|
3c492571ab | ||
|
|
f2410f7ea7 | ||
|
|
91560b6a7a | ||
|
|
b1dd448233 | ||
|
|
904daf6f40 | ||
|
|
8e31a5d7bd | ||
|
|
ee630b4539 | ||
|
|
46971447df | ||
|
|
d8b94007c1 | ||
|
|
cf595dcc38 | ||
|
|
d27211cfa7 | ||
|
|
ca1a3fbe88 | ||
|
|
c955b53aed | ||
|
|
2a626d9608 | ||
|
|
0861cba04b | ||
|
|
88246f45b3 | ||
|
|
1d04514354 | ||
|
|
c2324b8f3e | ||
|
|
957ea65d12 | ||
|
|
00fa38a295 | ||
|
|
9d98c1b669 | ||
|
|
00cc9d421f | ||
|
|
65716cf590 | ||
|
|
1b77a191f4 | ||
|
|
ebfde9173c | ||
|
|
2fe0369049 | ||
|
|
e023201d42 | ||
|
|
d40e340479 | ||
|
|
9a09ed0659 | ||
|
|
5f27b546dd | ||
|
|
022fdd52c3 | ||
|
|
7946a8f64e | ||
|
|
7af79039fc | ||
|
|
1755750ca1 | ||
|
|
ddb53672e2 | ||
|
|
eeae34972f | ||
|
|
47d89b1e47 | ||
|
|
ee0bdaeb79 | ||
|
|
915c446c48 | ||
|
|
d1e2099408 | ||
|
|
6ea15b9efa | ||
|
|
69f33aaff5 | ||
|
|
3f66f102d2 | ||
|
|
c6547f58b7 | ||
|
|
dfb05a7fa0 | ||
|
|
2f67f9ddcb | ||
|
|
0e36185933 | ||
|
|
6617865440 | ||
|
|
6dba4912be | ||
|
|
7a3827471b | ||
|
|
f006bc4c7e | ||
|
|
0a442644e3 | ||
|
|
4960663546 | ||
|
|
1381137c37 | ||
|
|
b4a042dfc4 | ||
|
|
81c4f21b52 | ||
|
|
f2dab562a8 | ||
|
|
61196a8280 | ||
|
|
7a97c31ac0 | ||
|
|
424214041e | ||
|
|
b06bd6a913 | ||
|
|
1c762187e8 | ||
|
|
90aefc607f | ||
|
|
2ca73c479b | ||
|
|
17c7c273b8 | ||
|
|
493be259c3 | ||
|
|
106c6ac273 | ||
|
|
7aaaa371e7 | ||
|
|
468dad1780 | ||
|
|
32d294b89a | ||
|
|
dc5b7dace8 | ||
|
|
e00b7233cf | ||
|
|
91f7e73c27 | ||
|
|
75fff151e8 | ||
|
|
d05a0cb80d | ||
|
|
d24aa69ceb | ||
|
|
fabcacc3e5 | ||
|
|
ac58d75113 | ||
|
|
28564ef94e | ||
|
|
b62a9b57f3 | ||
|
|
76dd656f2a | ||
|
|
d218936763 | ||
|
|
123e29dc26 | ||
|
|
6a1dca113e | ||
|
|
8aea6dd23a | ||
|
|
78a2f86f70 | ||
|
|
b5e23e5823 | ||
|
|
7872643910 | ||
|
|
f15391f4fc | ||
|
|
ca9b81cc2e | ||
|
|
a2a9a02ecb | ||
|
|
e5e1d6c705 | ||
|
|
6ee19473ba | ||
|
|
a59551f3b4 | ||
|
|
3286a98b27 | ||
|
|
62769a0dac | ||
|
|
f94108b4bc | ||
|
|
60a0ff8217 | ||
|
|
b3dffc70e2 | ||
|
|
86ac39e11f | ||
|
|
6e036d38b2 | ||
|
|
2d30ebb53b | ||
|
|
b3934b9580 | ||
|
|
09102a634a | ||
|
|
95ff5901a1 | ||
|
|
f3d7152074 | ||
|
|
dff37f6048 | ||
|
|
832036ef0f | ||
|
|
f1742954ab | ||
|
|
6ab0476676 | ||
|
|
d36413c821 | ||
|
|
99097f799c | ||
|
|
0666571519 | ||
|
|
ef85161525 | ||
|
|
079eb808f8 | ||
|
|
39fb2d1a3b | ||
|
|
db7f2db1ae | ||
|
|
df46c82ae2 | ||
|
|
f8adbbc461 | ||
|
|
17f0716d6c | ||
|
|
5acd34ae92 | ||
|
|
84dbebac4f | ||
|
|
eddfcd2c88 | ||
|
|
9f470d297f | ||
|
|
2222470f69 | ||
|
|
78175fcb96 | ||
|
|
d9e659ca4f | ||
|
|
e731ba1e47 | ||
|
|
557fc9a817 | ||
|
|
965dac74e5 | ||
|
|
7d7a50d4cc | ||
|
|
9319eecaba | ||
|
|
a47386f6dc | ||
|
|
aaf88c157f | ||
|
|
3dcf4ae1e9 | ||
|
|
3391168777 | ||
|
|
28728dca9f | ||
|
|
1ae7fb7694 | ||
|
|
7aef3388d9 | ||
|
|
1d056487c7 | ||
|
|
64e6798a39 | ||
|
|
4a65e827f7 | ||
|
|
35b89b8b10 | ||
|
|
8efa75d04c | ||
|
|
8fd54f13b5 | ||
|
|
952fa8aa99 | ||
|
|
3948273350 | ||
|
|
a16307fe84 | ||
|
|
af6f2cf366 | ||
|
|
6997867f0e | ||
|
|
de791bc3ef | ||
|
|
69c6e7de59 | ||
|
|
10cee59f2e | ||
|
|
58f521ea4f | ||
|
|
a194ae6959 | ||
|
|
4d623133a5 | ||
|
|
8fbf192c2a | ||
|
|
241a382fba | ||
|
|
c194ee2046 | ||
|
|
85567f1dc3 | ||
|
|
6f4978041e | ||
|
|
f1fca4f46f | ||
|
|
2b899fe961 | ||
|
|
3152d25811 | ||
|
|
3b8cb3d4b6 | ||
|
|
15047ae28a | ||
|
|
888fa3a2fb | ||
|
|
90346b8a35 | ||
|
|
2d5efd7b29 | ||
|
|
1d2273597a | ||
|
|
9dd494ddcd | ||
|
|
2fa07b19f6 | ||
|
|
a022e3c14d | ||
|
|
e0e11423d9 | ||
|
|
34de8ec1f3 | ||
|
|
3d288fd610 | ||
|
|
055cccde28 | ||
|
|
361514d11d | ||
|
|
90b68059f5 | ||
|
|
87ad5276e4 | ||
|
|
5489df75d7 | ||
|
|
c6b3f5b888 | ||
|
|
15db024811 | ||
|
|
6d73003b17 | ||
|
|
13259a109a | ||
|
|
aa78be574a | ||
|
|
d0dd1b30d1 | ||
|
|
0338a15192 | ||
|
|
e10d99b728 | ||
|
|
c9018f81ec | ||
|
|
31718492c7 | ||
|
|
2209878f48 | ||
|
|
dd77dbe3ab | ||
|
|
eb19e12527 | ||
|
|
551e86a517 | ||
|
|
8734c05f64 | ||
|
|
0c8cbfb7de | ||
|
|
89c3428d85 | ||
|
|
707e96c541 | ||
|
|
26e0a00c4c | ||
|
|
d0f8f00e7e | ||
|
|
a39132787c | ||
|
|
296994ebf0 | ||
|
|
b5b31eec88 | ||
|
|
8f6851c349 | ||
|
|
0788461abd | ||
|
|
3bfd1f6d8a | ||
|
|
d83c3a12bf | ||
|
|
79200cf3c2 | ||
|
|
bcb6789888 | ||
|
|
89b7933ef1 | ||
|
|
4da5a8081f | ||
|
|
53e9f00804 | ||
|
|
6e25e185f6 | ||
|
|
68ceeb64f6 | ||
|
|
edae976b81 | ||
|
|
9f4366bc9d | ||
|
|
99e0a60aab | ||
|
|
d38729fbac | ||
|
|
ff0d21cfd5 | ||
|
|
9140a7cb86 | ||
|
|
41fe18bc80 | ||
|
|
9105573cb3 | ||
|
|
fff87e95d1 | ||
|
|
9beb29a34c | ||
|
|
ca00f5aed9 | ||
|
|
637777b8e7 | ||
|
|
1cf851e054 | ||
|
|
961f965f0c | ||
|
|
760fc3bc12 | ||
|
|
e3fc7d8aa6 | ||
|
|
2b3b209e40 | ||
|
|
78903ac285 | ||
|
|
f361acc11c | ||
|
|
ed185c0026 | ||
|
|
6dc34beb71 | ||
|
|
c2205f88e6 | ||
|
|
abdbe185c5 | ||
|
|
c1b816cb7e | ||
|
|
0559558715 | ||
|
|
75965474fc | ||
|
|
5dc014fdf4 | ||
|
|
291a9fcea1 | ||
|
|
dd994b9d7f | ||
|
|
83901b30e3 | ||
|
|
bcfa21a6e7 | ||
|
|
af1da28459 | ||
|
|
ed2ee4e8cc | ||
|
|
f293c8ffd6 | ||
|
|
714c370191 | ||
|
|
a29d4e9c3a | ||
|
|
74983f8a96 | ||
|
|
11c5b86981 | ||
|
|
383f4c0ee9 | ||
|
|
045e7ad4a1 | ||
|
|
0e80291804 | ||
|
|
c99773b652 | ||
|
|
5f9e3e33cd | ||
|
|
6fc21afbc9 | ||
|
|
50445d4a27 | ||
|
|
11a2efe49b | ||
|
|
d8a680ee57 | ||
|
|
f405a2c57d | ||
|
|
3576e690fa | ||
|
|
057ac361ef | ||
|
|
d9675a4a20 | ||
|
|
c27271f3ae | ||
|
|
a3e4f4c2e3 | ||
|
|
b5030badbe | ||
|
|
b6132fc23e | ||
|
|
f33b1b3d77 | ||
|
|
c382788342 | ||
|
|
e193a1f273 | ||
|
|
eb70672f4a | ||
|
|
87df179ca9 | ||
|
|
982a950ccf | ||
|
|
c2435eeca5 | ||
|
|
68c56440cf | ||
|
|
31eeb50ce0 | ||
|
|
0039b3b046 | ||
|
|
ffb1a08871 | ||
|
|
d13823043d | ||
|
|
b665b81a0e | ||
|
|
6b9b177b89 | ||
|
|
b1acf8d931 | ||
|
|
97f731da7e | ||
|
|
1bf29da0d6 | ||
|
|
2c3fec014f | ||
|
|
4c38157ee0 | ||
|
|
b5f8e87e2f | ||
|
|
6a2efd060e | ||
|
|
cda336295f | ||
|
|
02f4256cb6 | ||
|
|
492ba3d127 | ||
|
|
cbf8d46d3e | ||
|
|
58598f01b0 | ||
|
|
89fe7e1ac1 | ||
|
|
a24712f7f7 | ||
|
|
8446fef00d | ||
|
|
8bcdfbb24e | ||
|
|
b8ebc14a23 | ||
|
|
aa442bc52f | ||
|
|
2e024b7ede | ||
|
|
c8205ff511 | ||
|
|
ea0a25d7fe | ||
|
|
29b5df3881 | ||
|
|
690b620b7f | ||
|
|
c55c9785be | ||
|
|
20e04fc3dd | ||
|
|
078137f0ba | ||
|
|
d0f5a1cc96 | ||
|
|
e8e41bd7a6 | ||
|
|
7326966566 | ||
|
|
6eb1c34ba1 | ||
|
|
d390d2f28f | ||
|
|
2fa9741f99 | ||
|
|
ba35387c9e | ||
|
|
de48e102c4 | ||
|
|
08bf8f3dc9 | ||
|
|
00f4db54c4 | ||
|
|
62ccf7e8a4 | ||
|
|
0ff2bc890b | ||
|
|
426b8e2e6a | ||
|
|
bfed5f67a8 | ||
|
|
a4c8baebc5 | ||
|
|
a869f84c62 | ||
|
|
0ccc0cbdae | ||
|
|
7404338786 | ||
|
|
f308139283 | ||
|
|
95a451ef2c | ||
|
|
c8636a626a | ||
|
|
4e50ec4b98 | ||
|
|
90e4d944ac | ||
|
|
a16342b2bb | ||
|
|
8e7cd85431 | ||
|
|
66889e2804 | ||
|
|
6ea03ab46c | ||
|
|
99d8504731 | ||
|
|
a9aa3f232d | ||
|
|
20514f5d44 | ||
|
|
df2ecd9448 | ||
|
|
bdb7dbbf16 | ||
|
|
30f7c87b6f | ||
|
|
fdf8181f58 | ||
|
|
8a95eb1ef7 | ||
|
|
4d1cfa494a | ||
|
|
2286d0d993 | ||
|
|
46b87e435c | ||
|
|
905c6d7bad | ||
|
|
acd1aa813c | ||
|
|
2671fee2c6 | ||
|
|
010ed5d096 | ||
|
|
7f5be6b65c | ||
|
|
1074ce5fe5 | ||
|
|
3d2f13a2f1 | ||
|
|
99361e623a | ||
|
|
5a016de53f | ||
|
|
b541a56c66 | ||
|
|
4a6890a4e5 | ||
|
|
e2e0327c90 | ||
|
|
bba37bd6be | ||
|
|
b051ff4a84 | ||
|
|
13812f0df8 | ||
|
|
420dcf5c4a | ||
|
|
9f75e20d4f | ||
|
|
743e9b2ad1 |
132
.github/CODE_OF_CONDUCT.md
vendored
132
.github/CODE_OF_CONDUCT.md
vendored
@@ -1,132 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
conduct@langchain.dev.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
6
.github/CONTRIBUTING.md
vendored
6
.github/CONTRIBUTING.md
vendored
@@ -1,6 +0,0 @@
|
||||
# Contributing to LangChain
|
||||
|
||||
Hi there! Thank you for even being interested in contributing to LangChain.
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether they involve new features, improved infrastructure, better documentation, or bug fixes.
|
||||
|
||||
To learn how to contribute to LangChain, please follow the [contribution guide here](https://docs.langchain.com/oss/python/contributing).
|
||||
77
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
77
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -8,16 +8,15 @@ body:
|
||||
value: |
|
||||
Thank you for taking the time to file a bug report.
|
||||
|
||||
Use this to report BUGS in LangChain. For usage questions, feature requests and general design questions, please use the [LangChain Forum](https://forum.langchain.com/).
|
||||
For usage questions, feature requests and general design questions, please use the [LangChain Forum](https://forum.langchain.com/).
|
||||
|
||||
Relevant links to check before filing a bug report to see if your issue has already been reported, fixed or
|
||||
if there's another way to solve your problem:
|
||||
Check these before submitting to see if your issue has already been reported, fixed or if there's another way to solve your problem:
|
||||
|
||||
* [LangChain Forum](https://forum.langchain.com/),
|
||||
* [LangChain documentation with the integrated search](https://docs.langchain.com/oss/python/langchain/overview),
|
||||
* [API Reference](https://reference.langchain.com/python/),
|
||||
* [Documentation](https://docs.langchain.com/oss/python/langchain/overview),
|
||||
* [API Reference Documentation](https://reference.langchain.com/python/),
|
||||
* [LangChain ChatBot](https://chat.langchain.com/)
|
||||
* [GitHub search](https://github.com/langchain-ai/langchain),
|
||||
* [LangChain Forum](https://forum.langchain.com/),
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
@@ -36,16 +35,48 @@ body:
|
||||
required: true
|
||||
- label: This is not related to the langchain-community package.
|
||||
required: true
|
||||
- label: I read what a minimal reproducible example is (https://stackoverflow.com/help/minimal-reproducible-example).
|
||||
required: true
|
||||
- label: I posted a self-contained, minimal, reproducible example. A maintainer can copy it and run it AS IS.
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: package
|
||||
attributes:
|
||||
label: Package (Required)
|
||||
description: |
|
||||
Which `langchain` package(s) is this bug related to? Select at least one.
|
||||
|
||||
Note that if the package you are reporting for is not listed here, it is not in this repository (e.g. `langchain-google-genai` is in [`langchain-ai/langchain-google`](https://github.com/langchain-ai/langchain-google/)).
|
||||
|
||||
Please report issues for other packages to their respective repositories.
|
||||
options:
|
||||
- label: langchain
|
||||
- label: langchain-openai
|
||||
- label: langchain-anthropic
|
||||
- label: langchain-classic
|
||||
- label: langchain-core
|
||||
- label: langchain-cli
|
||||
- label: langchain-model-profiles
|
||||
- label: langchain-tests
|
||||
- label: langchain-text-splitters
|
||||
- label: langchain-chroma
|
||||
- label: langchain-deepseek
|
||||
- label: langchain-exa
|
||||
- label: langchain-fireworks
|
||||
- label: langchain-groq
|
||||
- label: langchain-huggingface
|
||||
- label: langchain-mistralai
|
||||
- label: langchain-nomic
|
||||
- label: langchain-ollama
|
||||
- label: langchain-perplexity
|
||||
- label: langchain-prompty
|
||||
- label: langchain-qdrant
|
||||
- label: langchain-xai
|
||||
- label: Other / not sure / general
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Example Code
|
||||
label: Example Code (Python)
|
||||
description: |
|
||||
Please add a self-contained, [minimal, reproducible, example](https://stackoverflow.com/help/minimal-reproducible-example) with your use case.
|
||||
|
||||
@@ -53,15 +84,12 @@ body:
|
||||
|
||||
**Important!**
|
||||
|
||||
* Avoid screenshots when possible, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code.
|
||||
* Reduce your code to the minimum required to reproduce the issue if possible. This makes it much easier for others to help you.
|
||||
* Use code tags (e.g., ```python ... ```) to correctly [format your code](https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting).
|
||||
* INCLUDE the language label (e.g. `python`) after the first three backticks to enable syntax highlighting. (e.g., ```python rather than ```).
|
||||
* Avoid screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code.
|
||||
* Reduce your code to the minimum required to reproduce the issue if possible.
|
||||
|
||||
(This will be automatically formatted into code, so no need for backticks.)
|
||||
render: python
|
||||
placeholder: |
|
||||
The following code:
|
||||
|
||||
```python
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
|
||||
def bad_code(inputs) -> int:
|
||||
@@ -69,17 +97,14 @@ body:
|
||||
|
||||
chain = RunnableLambda(bad_code)
|
||||
chain.invoke('Hello!')
|
||||
```
|
||||
- type: textarea
|
||||
id: error
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: Error Message and Stack Trace (if applicable)
|
||||
description: |
|
||||
If you are reporting an error, please include the full error message and stack trace.
|
||||
placeholder: |
|
||||
Exception + full stack trace
|
||||
If you are reporting an error, please copy and paste the full error message and
|
||||
stack trace.
|
||||
(This will be automatically formatted into code, so no need for backticks.)
|
||||
render: shell
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
@@ -99,9 +124,7 @@ body:
|
||||
attributes:
|
||||
label: System Info
|
||||
description: |
|
||||
Please share your system info with us. Do NOT skip this step and please don't trim
|
||||
the output. Most users don't include enough information here and it makes it harder
|
||||
for us to help you.
|
||||
Please share your system info with us.
|
||||
|
||||
Run the following command in your terminal and paste the output here:
|
||||
|
||||
@@ -113,8 +136,6 @@ body:
|
||||
from langchain_core import sys_info
|
||||
sys_info.print_sys_info()
|
||||
```
|
||||
|
||||
alternatively, put the entire output of `pip freeze` here.
|
||||
placeholder: |
|
||||
python -m langchain_core.sys_info
|
||||
validations:
|
||||
|
||||
10
.github/ISSUE_TEMPLATE/config.yml
vendored
10
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,9 +1,15 @@
|
||||
blank_issues_enabled: false
|
||||
version: 2.1
|
||||
contact_links:
|
||||
- name: 📚 Documentation
|
||||
url: https://github.com/langchain-ai/docs/issues/new?template=langchain.yml
|
||||
- name: 📚 Documentation issue
|
||||
url: https://github.com/langchain-ai/docs/issues/new?template=01-langchain.yml
|
||||
about: Report an issue related to the LangChain documentation
|
||||
- name: 💬 LangChain Forum
|
||||
url: https://forum.langchain.com/
|
||||
about: General community discussions and support
|
||||
- name: 📚 LangChain Documentation
|
||||
url: https://docs.langchain.com/oss/python/langchain/overview
|
||||
about: View the official LangChain documentation
|
||||
- name: 📚 API Reference Documentation
|
||||
url: https://reference.langchain.com/python/
|
||||
about: View the official LangChain API reference documentation
|
||||
|
||||
40
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
40
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -13,11 +13,11 @@ body:
|
||||
Relevant links to check before filing a feature request to see if your request has already been made or
|
||||
if there's another way to achieve what you want:
|
||||
|
||||
* [LangChain Forum](https://forum.langchain.com/),
|
||||
* [LangChain documentation with the integrated search](https://docs.langchain.com/oss/python/langchain/overview),
|
||||
* [API Reference](https://reference.langchain.com/python/),
|
||||
* [Documentation](https://docs.langchain.com/oss/python/langchain/overview),
|
||||
* [API Reference Documentation](https://reference.langchain.com/python/),
|
||||
* [LangChain ChatBot](https://chat.langchain.com/)
|
||||
* [GitHub search](https://github.com/langchain-ai/langchain),
|
||||
* [LangChain Forum](https://forum.langchain.com/),
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
@@ -34,6 +34,40 @@ body:
|
||||
required: true
|
||||
- label: This is not related to the langchain-community package.
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: package
|
||||
attributes:
|
||||
label: Package (Required)
|
||||
description: |
|
||||
Which `langchain` package(s) is this request related to? Select at least one.
|
||||
|
||||
Note that if the package you are requesting for is not listed here, it is not in this repository (e.g. `langchain-google-genai` is in `langchain-ai/langchain`).
|
||||
|
||||
Please submit feature requests for other packages to their respective repositories.
|
||||
options:
|
||||
- label: langchain
|
||||
- label: langchain-openai
|
||||
- label: langchain-anthropic
|
||||
- label: langchain-classic
|
||||
- label: langchain-core
|
||||
- label: langchain-cli
|
||||
- label: langchain-model-profiles
|
||||
- label: langchain-tests
|
||||
- label: langchain-text-splitters
|
||||
- label: langchain-chroma
|
||||
- label: langchain-deepseek
|
||||
- label: langchain-exa
|
||||
- label: langchain-fireworks
|
||||
- label: langchain-groq
|
||||
- label: langchain-huggingface
|
||||
- label: langchain-mistralai
|
||||
- label: langchain-nomic
|
||||
- label: langchain-ollama
|
||||
- label: langchain-perplexity
|
||||
- label: langchain-prompty
|
||||
- label: langchain-qdrant
|
||||
- label: langchain-xai
|
||||
- label: Other / not sure / general
|
||||
- type: textarea
|
||||
id: feature-description
|
||||
validations:
|
||||
|
||||
30
.github/ISSUE_TEMPLATE/privileged.yml
vendored
30
.github/ISSUE_TEMPLATE/privileged.yml
vendored
@@ -18,3 +18,33 @@ body:
|
||||
attributes:
|
||||
label: Issue Content
|
||||
description: Add the content of the issue here.
|
||||
- type: checkboxes
|
||||
id: package
|
||||
attributes:
|
||||
label: Package (Required)
|
||||
description: |
|
||||
Please select package(s) that this issue is related to.
|
||||
options:
|
||||
- label: langchain
|
||||
- label: langchain-openai
|
||||
- label: langchain-anthropic
|
||||
- label: langchain-classic
|
||||
- label: langchain-core
|
||||
- label: langchain-cli
|
||||
- label: langchain-model-profiles
|
||||
- label: langchain-tests
|
||||
- label: langchain-text-splitters
|
||||
- label: langchain-chroma
|
||||
- label: langchain-deepseek
|
||||
- label: langchain-exa
|
||||
- label: langchain-fireworks
|
||||
- label: langchain-groq
|
||||
- label: langchain-huggingface
|
||||
- label: langchain-mistralai
|
||||
- label: langchain-nomic
|
||||
- label: langchain-ollama
|
||||
- label: langchain-perplexity
|
||||
- label: langchain-prompty
|
||||
- label: langchain-qdrant
|
||||
- label: langchain-xai
|
||||
- label: Other / not sure / general
|
||||
|
||||
48
.github/ISSUE_TEMPLATE/task.yml
vendored
48
.github/ISSUE_TEMPLATE/task.yml
vendored
@@ -25,13 +25,13 @@ body:
|
||||
label: Task Description
|
||||
description: |
|
||||
Provide a clear and detailed description of the task.
|
||||
|
||||
|
||||
What needs to be done? Be specific about the scope and requirements.
|
||||
placeholder: |
|
||||
This task involves...
|
||||
|
||||
|
||||
The goal is to...
|
||||
|
||||
|
||||
Specific requirements:
|
||||
- ...
|
||||
- ...
|
||||
@@ -43,7 +43,7 @@ body:
|
||||
label: Acceptance Criteria
|
||||
description: |
|
||||
Define the criteria that must be met for this task to be considered complete.
|
||||
|
||||
|
||||
What are the specific deliverables or outcomes expected?
|
||||
placeholder: |
|
||||
This task will be complete when:
|
||||
@@ -58,15 +58,15 @@ body:
|
||||
label: Context and Background
|
||||
description: |
|
||||
Provide any relevant context, background information, or links to related issues/PRs.
|
||||
|
||||
|
||||
Why is this task needed? What problem does it solve?
|
||||
placeholder: |
|
||||
Background:
|
||||
- ...
|
||||
|
||||
|
||||
Related issues/PRs:
|
||||
- #...
|
||||
|
||||
|
||||
Additional context:
|
||||
- ...
|
||||
validations:
|
||||
@@ -77,15 +77,45 @@ body:
|
||||
label: Dependencies
|
||||
description: |
|
||||
List any dependencies or blockers for this task.
|
||||
|
||||
|
||||
Are there other tasks, issues, or external factors that need to be completed first?
|
||||
placeholder: |
|
||||
This task depends on:
|
||||
- [ ] Issue #...
|
||||
- [ ] PR #...
|
||||
- [ ] External dependency: ...
|
||||
|
||||
|
||||
Blocked by:
|
||||
- ...
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: package
|
||||
attributes:
|
||||
label: Package (Required)
|
||||
description: |
|
||||
Please select package(s) that this task is related to.
|
||||
options:
|
||||
- label: langchain
|
||||
- label: langchain-openai
|
||||
- label: langchain-anthropic
|
||||
- label: langchain-classic
|
||||
- label: langchain-core
|
||||
- label: langchain-cli
|
||||
- label: langchain-model-profiles
|
||||
- label: langchain-tests
|
||||
- label: langchain-text-splitters
|
||||
- label: langchain-chroma
|
||||
- label: langchain-deepseek
|
||||
- label: langchain-exa
|
||||
- label: langchain-fireworks
|
||||
- label: langchain-groq
|
||||
- label: langchain-huggingface
|
||||
- label: langchain-mistralai
|
||||
- label: langchain-nomic
|
||||
- label: langchain-ollama
|
||||
- label: langchain-perplexity
|
||||
- label: langchain-prompty
|
||||
- label: langchain-qdrant
|
||||
- label: langchain-xai
|
||||
- label: Other / not sure / general
|
||||
|
||||
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,28 +1,30 @@
|
||||
(Replace this entire block of text)
|
||||
|
||||
Thank you for contributing to LangChain! Follow these steps to mark your pull request as ready for review. **If any of these steps are not completed, your PR will not be considered for review.**
|
||||
Read the full contributing guidelines: https://docs.langchain.com/oss/python/contributing/overview
|
||||
|
||||
Thank you for contributing to LangChain! Follow these steps to have your pull request considered as ready for review.
|
||||
|
||||
1. PR title: Should follow the format: TYPE(SCOPE): DESCRIPTION
|
||||
|
||||
- [ ] **PR title**: Follows the format: {TYPE}({SCOPE}): {DESCRIPTION}
|
||||
- Examples:
|
||||
- fix(anthropic): resolve flag parsing error
|
||||
- feat(core): add multi-tenant support
|
||||
- fix(cli): resolve flag parsing error
|
||||
- docs(openai): update API usage examples
|
||||
- Allowed `{TYPE}` values:
|
||||
- feat, fix, docs, style, refactor, perf, test, build, ci, chore, revert, release
|
||||
- Allowed `{SCOPE}` values (optional):
|
||||
- core, cli, langchain, standard-tests, text-splitters, docs, anthropic, chroma, deepseek, exa, fireworks, groq, huggingface, mistralai, nomic, ollama, openai, perplexity, prompty, qdrant, xai, infra
|
||||
- Once you've written the title, please delete this checklist item; do not include it in the PR.
|
||||
- test(openai): update API usage tests
|
||||
- Allowed TYPE and SCOPE values: https://github.com/langchain-ai/langchain/blob/master/.github/workflows/pr_lint.yml#L15-L33
|
||||
|
||||
- [ ] **PR message**: ***Delete this entire checklist*** and replace with
|
||||
- **Description:** a description of the change. Include a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/using-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) if applicable to a relevant issue.
|
||||
- **Issue:** the issue # it fixes, if applicable (e.g. Fixes #123)
|
||||
- **Dependencies:** any dependencies required for this change
|
||||
2. PR description:
|
||||
|
||||
- [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. **We will not consider a PR unless these three are passing in CI.** See [contribution guidelines](https://docs.langchain.com/oss/python/contributing) for more.
|
||||
- Write 1-2 sentences summarizing the change.
|
||||
- If this PR addresses a specific issue, please include "Fixes #ISSUE_NUMBER" in the description to automatically close the issue when the PR is merged.
|
||||
- If there are any breaking changes, please clearly describe them.
|
||||
- If this PR depends on another PR being merged first, please include "Depends on #PR_NUMBER" inthe description.
|
||||
|
||||
3. Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified.
|
||||
|
||||
- We will not consider a PR unless these three are passing in CI.
|
||||
|
||||
Additional guidelines:
|
||||
|
||||
- Most PRs should not touch more than one package.
|
||||
- Please do not add dependencies to `pyproject.toml` files (even optional ones) unless they are **required** for unit tests. Likewise, please do not update the `uv.lock` files unless you are adding a required dependency.
|
||||
- Changes should be backwards compatible.
|
||||
- Make sure optional dependencies are imported within a function.
|
||||
- We ask that if you use generative AI for your contribution, you include a disclaimer.
|
||||
- PRs should not touch more than one package unless absolutely necessary.
|
||||
- Do not update the `uv.lock` files unless or add dependencies to `pyproject.toml` files (even optional ones) unless you have explicit permission to do so by a maintainer.
|
||||
|
||||
93
.github/actions/poetry_setup/action.yml
vendored
93
.github/actions/poetry_setup/action.yml
vendored
@@ -1,93 +0,0 @@
|
||||
# An action for setting up poetry install with caching.
|
||||
# Using a custom action since the default action does not
|
||||
# take poetry install groups into account.
|
||||
# Action code from:
|
||||
# https://github.com/actions/setup-python/issues/505#issuecomment-1273013236
|
||||
name: poetry-install-with-caching
|
||||
description: Poetry install with support for caching of dependency groups.
|
||||
|
||||
inputs:
|
||||
python-version:
|
||||
description: Python version, supporting MAJOR.MINOR only
|
||||
required: true
|
||||
|
||||
poetry-version:
|
||||
description: Poetry version
|
||||
required: true
|
||||
|
||||
cache-key:
|
||||
description: Cache key to use for manual handling of caching
|
||||
required: true
|
||||
|
||||
working-directory:
|
||||
description: Directory whose poetry.lock file should be cached
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-python@v5
|
||||
name: Setup python ${{ inputs.python-version }}
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- uses: actions/cache@v4
|
||||
id: cache-bin-poetry
|
||||
name: Cache Poetry binary - Python ${{ inputs.python-version }}
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1"
|
||||
with:
|
||||
path: |
|
||||
/opt/pipx/venvs/poetry
|
||||
# This step caches the poetry installation, so make sure it's keyed on the poetry version as well.
|
||||
key: bin-poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-${{ inputs.poetry-version }}
|
||||
|
||||
- name: Refresh shell hashtable and fixup softlinks
|
||||
if: steps.cache-bin-poetry.outputs.cache-hit == 'true'
|
||||
shell: bash
|
||||
env:
|
||||
POETRY_VERSION: ${{ inputs.poetry-version }}
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
run: |
|
||||
set -eux
|
||||
|
||||
# Refresh the shell hashtable, to ensure correct `which` output.
|
||||
hash -r
|
||||
|
||||
# `actions/cache@v3` doesn't always seem able to correctly unpack softlinks.
|
||||
# Delete and recreate the softlinks pipx expects to have.
|
||||
rm /opt/pipx/venvs/poetry/bin/python
|
||||
cd /opt/pipx/venvs/poetry/bin
|
||||
ln -s "$(which "python$PYTHON_VERSION")" python
|
||||
chmod +x python
|
||||
cd /opt/pipx_bin/
|
||||
ln -s /opt/pipx/venvs/poetry/bin/poetry poetry
|
||||
chmod +x poetry
|
||||
|
||||
# Ensure everything got set up correctly.
|
||||
/opt/pipx/venvs/poetry/bin/python --version
|
||||
/opt/pipx_bin/poetry --version
|
||||
|
||||
- name: Install poetry
|
||||
if: steps.cache-bin-poetry.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
env:
|
||||
POETRY_VERSION: ${{ inputs.poetry-version }}
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
# Install poetry using the python version installed by setup-python step.
|
||||
run: pipx install "poetry==$POETRY_VERSION" --python '${{ steps.setup-python.outputs.python-path }}' --verbose
|
||||
|
||||
- name: Restore pip and poetry cached dependencies
|
||||
uses: actions/cache@v4
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pip
|
||||
~/.cache/pypoetry/virtualenvs
|
||||
~/.cache/pypoetry/cache
|
||||
~/.cache/pypoetry/artifacts
|
||||
${{ env.WORKDIR }}/.venv
|
||||
key: py-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/**/poetry.lock', env.WORKDIR)) }}
|
||||
330
.github/copilot-instructions.md
vendored
330
.github/copilot-instructions.md
vendored
@@ -1,330 +0,0 @@
|
||||
# Global Development Guidelines for LangChain Projects
|
||||
|
||||
## Core Development Principles
|
||||
|
||||
### 1. Maintain Stable Public Interfaces ⚠️ CRITICAL
|
||||
|
||||
**Always attempt to preserve function signatures, argument positions, and names for exported/public methods.**
|
||||
|
||||
❌ **Bad - Breaking Change:**
|
||||
|
||||
```python
|
||||
def get_user(id, verbose=False): # Changed from `user_id`
|
||||
pass
|
||||
```
|
||||
|
||||
✅ **Good - Stable Interface:**
|
||||
|
||||
```python
|
||||
def get_user(user_id: str, verbose: bool = False) -> User:
|
||||
"""Retrieve user by ID with optional verbose output."""
|
||||
pass
|
||||
```
|
||||
|
||||
**Before making ANY changes to public APIs:**
|
||||
|
||||
- Check if the function/class is exported in `__init__.py`
|
||||
- Look for existing usage patterns in tests and examples
|
||||
- Use keyword-only arguments for new parameters: `*, new_param: str = "default"`
|
||||
- Mark experimental features clearly with docstring admonitions (using MkDocs Material, like `!!! warning`)
|
||||
|
||||
🧠 *Ask yourself:* "Would this change break someone's code if they used it last week?"
|
||||
|
||||
### 2. Code Quality Standards
|
||||
|
||||
**All Python code MUST include type hints and return types.**
|
||||
|
||||
❌ **Bad:**
|
||||
|
||||
```python
|
||||
def p(u, d):
|
||||
return [x for x in u if x not in d]
|
||||
```
|
||||
|
||||
✅ **Good:**
|
||||
|
||||
```python
|
||||
def filter_unknown_users(users: list[str], known_users: set[str]) -> list[str]:
|
||||
"""Filter out users that are not in the known users set.
|
||||
|
||||
Args:
|
||||
users: List of user identifiers to filter.
|
||||
known_users: Set of known/valid user identifiers.
|
||||
|
||||
Returns:
|
||||
List of users that are not in the known_users set.
|
||||
"""
|
||||
return [user for user in users if user not in known_users]
|
||||
```
|
||||
|
||||
**Style Requirements:**
|
||||
|
||||
- Use descriptive, **self-explanatory variable names**. Avoid overly short or cryptic identifiers.
|
||||
- Attempt to break up complex functions (>20 lines) into smaller, focused functions where it makes sense
|
||||
- Avoid unnecessary abstraction or premature optimization
|
||||
- Follow existing patterns in the codebase you're modifying
|
||||
|
||||
### 3. Testing Requirements
|
||||
|
||||
**Every new feature or bugfix MUST be covered by unit tests.**
|
||||
|
||||
**Test Organization:**
|
||||
|
||||
- Unit tests: `tests/unit_tests/` (no network calls allowed)
|
||||
- Integration tests: `tests/integration_tests/` (network calls permitted)
|
||||
- Use `pytest` as the testing framework
|
||||
|
||||
**Test Quality Checklist:**
|
||||
|
||||
- [ ] Tests fail when your new logic is broken
|
||||
- [ ] Happy path is covered
|
||||
- [ ] Edge cases and error conditions are tested
|
||||
- [ ] Use fixtures/mocks for external dependencies
|
||||
- [ ] Tests are deterministic (no flaky tests)
|
||||
|
||||
Checklist questions:
|
||||
|
||||
- [ ] Does the test suite fail if your new logic is broken?
|
||||
- [ ] Are all expected behaviors exercised (happy path, invalid input, etc)?
|
||||
- [ ] Do tests use fixtures or mocks where needed?
|
||||
|
||||
```python
|
||||
def test_filter_unknown_users():
|
||||
"""Test filtering unknown users from a list."""
|
||||
users = ["alice", "bob", "charlie"]
|
||||
known_users = {"alice", "bob"}
|
||||
|
||||
result = filter_unknown_users(users, known_users)
|
||||
|
||||
assert result == ["charlie"]
|
||||
assert len(result) == 1
|
||||
```
|
||||
|
||||
### 4. Security and Risk Assessment
|
||||
|
||||
**Security Checklist:**
|
||||
|
||||
- No `eval()`, `exec()`, or `pickle` on user-controlled input
|
||||
- Proper exception handling (no bare `except:`) and use a `msg` variable for error messages
|
||||
- Remove unreachable/commented code before committing
|
||||
- Race conditions or resource leaks (file handles, sockets, threads).
|
||||
- Ensure proper resource cleanup (file handles, connections)
|
||||
|
||||
❌ **Bad:**
|
||||
|
||||
```python
|
||||
def load_config(path):
|
||||
with open(path) as f:
|
||||
return eval(f.read()) # ⚠️ Never eval config
|
||||
```
|
||||
|
||||
✅ **Good:**
|
||||
|
||||
```python
|
||||
import json
|
||||
|
||||
def load_config(path: str) -> dict:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
```
|
||||
|
||||
### 5. Documentation Standards
|
||||
|
||||
**Use Google-style docstrings with Args and Returns sections for all public functions.**
|
||||
|
||||
❌ **Insufficient Documentation:**
|
||||
|
||||
```python
|
||||
def send_email(to, msg):
|
||||
"""Send an email to a recipient."""
|
||||
```
|
||||
|
||||
✅ **Complete Documentation:**
|
||||
|
||||
```python
|
||||
def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
"""
|
||||
Send an email to a recipient with specified priority.
|
||||
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level.
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
### 6. Architectural Improvements
|
||||
|
||||
**When you encounter code that could be improved, suggest better designs:**
|
||||
|
||||
❌ **Poor Design:**
|
||||
|
||||
```python
|
||||
def process_data(data, db_conn, email_client, logger):
|
||||
# Function doing too many things
|
||||
validated = validate_data(data)
|
||||
result = db_conn.save(validated)
|
||||
email_client.send_notification(result)
|
||||
logger.log(f"Processed {len(data)} items")
|
||||
return result
|
||||
```
|
||||
|
||||
✅ **Better Design:**
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class ProcessingResult:
|
||||
"""Result of data processing operation."""
|
||||
items_processed: int
|
||||
success: bool
|
||||
errors: List[str] = field(default_factory=list)
|
||||
|
||||
class DataProcessor:
|
||||
"""Handles data validation, storage, and notification."""
|
||||
|
||||
def __init__(self, db_conn: Database, email_client: EmailClient):
|
||||
self.db = db_conn
|
||||
self.email = email_client
|
||||
|
||||
def process(self, data: List[dict]) -> ProcessingResult:
|
||||
"""Process and store data with notifications.
|
||||
|
||||
Args:
|
||||
data: List of data items to process.
|
||||
|
||||
Returns:
|
||||
ProcessingResult with details of the operation.
|
||||
"""
|
||||
validated = self._validate_data(data)
|
||||
result = self.db.save(validated)
|
||||
self._notify_completion(result)
|
||||
return result
|
||||
```
|
||||
|
||||
**Design Improvement Areas:**
|
||||
|
||||
If there's a **cleaner**, **more scalable**, or **simpler** design, highlight it and suggest improvements that would:
|
||||
|
||||
- Reduce code duplication through shared utilities
|
||||
- Make unit testing easier
|
||||
- Improve separation of concerns (single responsibility)
|
||||
- Make unit testing easier through dependency injection
|
||||
- Add clarity without adding complexity
|
||||
- Prefer dataclasses for structured data
|
||||
|
||||
## Development Tools & Commands
|
||||
|
||||
### Package Management
|
||||
|
||||
```bash
|
||||
# Add package
|
||||
uv add package-name
|
||||
|
||||
# Sync project dependencies
|
||||
uv sync
|
||||
uv lock
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Run unit tests (no network)
|
||||
make test
|
||||
|
||||
# Don't run integration tests, as API keys must be set
|
||||
|
||||
# Run specific test file
|
||||
uv run --group test pytest tests/unit_tests/test_specific.py
|
||||
```
|
||||
|
||||
### Code Quality
|
||||
|
||||
```bash
|
||||
# Lint code
|
||||
make lint
|
||||
|
||||
# Format code
|
||||
make format
|
||||
|
||||
# Type checking
|
||||
uv run --group lint mypy .
|
||||
```
|
||||
|
||||
### Dependency Management Patterns
|
||||
|
||||
**Local Development Dependencies:**
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
langchain-core = { path = "../core", editable = true }
|
||||
langchain-tests = { path = "../standard-tests", editable = true }
|
||||
```
|
||||
|
||||
**For tools, use the `@tool` decorator from `langchain_core.tools`:**
|
||||
|
||||
```python
|
||||
from langchain_core.tools import tool
|
||||
|
||||
@tool
|
||||
def search_database(query: str) -> str:
|
||||
"""Search the database for relevant information.
|
||||
|
||||
Args:
|
||||
query: The search query string.
|
||||
"""
|
||||
# Implementation here
|
||||
return results
|
||||
```
|
||||
|
||||
## Commit Standards
|
||||
|
||||
**Use Conventional Commits format for PR titles:**
|
||||
|
||||
- `feat(core): add multi-tenant support`
|
||||
- `!fix(cli): resolve flag parsing error` (breaking change uses exclamation mark)
|
||||
- `docs: update API usage examples`
|
||||
- `docs(openai): update API usage examples`
|
||||
|
||||
## Framework-Specific Guidelines
|
||||
|
||||
- Follow the existing patterns in `langchain_core` for base abstractions
|
||||
- Implement proper streaming support where applicable
|
||||
- Avoid deprecated components
|
||||
|
||||
### Partner Integrations
|
||||
|
||||
- Follow the established patterns in existing partner libraries
|
||||
- Implement standard interfaces (`BaseChatModel`, `BaseEmbeddings`, etc.)
|
||||
- Include comprehensive integration tests
|
||||
- Document API key requirements and authentication
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference Checklist
|
||||
|
||||
Before submitting code changes:
|
||||
|
||||
- [ ] **Breaking Changes**: Verified no public API changes
|
||||
- [ ] **Type Hints**: All functions have complete type annotations
|
||||
- [ ] **Tests**: New functionality is fully tested
|
||||
- [ ] **Security**: No dangerous patterns (eval, silent failures, etc.)
|
||||
- [ ] **Documentation**: Google-style docstrings for public functions
|
||||
- [ ] **Code Quality**: `make lint` and `make format` pass
|
||||
- [ ] **Architecture**: Suggested improvements where applicable
|
||||
- [ ] **Commit Message**: Follows Conventional Commits format
|
||||
96
.github/pr-file-labeler.yml
vendored
96
.github/pr-file-labeler.yml
vendored
@@ -7,13 +7,12 @@ core:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/core/**/*"
|
||||
|
||||
langchain:
|
||||
langchain-classic:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/langchain/**/*"
|
||||
- "libs/langchain_v1/**/*"
|
||||
|
||||
v1:
|
||||
langchain:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/langchain_v1/**/*"
|
||||
@@ -28,6 +27,11 @@ standard-tests:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/standard-tests/**/*"
|
||||
|
||||
model-profiles:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/model-profiles/**/*"
|
||||
|
||||
text-splitters:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
@@ -39,6 +43,81 @@ integration:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/**/*"
|
||||
|
||||
anthropic:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/anthropic/**/*"
|
||||
|
||||
chroma:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/chroma/**/*"
|
||||
|
||||
deepseek:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/deepseek/**/*"
|
||||
|
||||
exa:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/exa/**/*"
|
||||
|
||||
fireworks:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/fireworks/**/*"
|
||||
|
||||
groq:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/groq/**/*"
|
||||
|
||||
huggingface:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/huggingface/**/*"
|
||||
|
||||
mistralai:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/mistralai/**/*"
|
||||
|
||||
nomic:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/nomic/**/*"
|
||||
|
||||
ollama:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/ollama/**/*"
|
||||
|
||||
openai:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/openai/**/*"
|
||||
|
||||
perplexity:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/perplexity/**/*"
|
||||
|
||||
prompty:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/prompty/**/*"
|
||||
|
||||
qdrant:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/qdrant/**/*"
|
||||
|
||||
xai:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "libs/partners/xai/**/*"
|
||||
|
||||
# Infrastructure and DevOps
|
||||
infra:
|
||||
- changed-files:
|
||||
@@ -69,16 +148,5 @@ documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "**/*.md"
|
||||
- "**/*.rst"
|
||||
- "**/README*"
|
||||
|
||||
# Security related changes
|
||||
security:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- "**/*security*"
|
||||
- "**/*auth*"
|
||||
- "**/*credential*"
|
||||
- "**/*secret*"
|
||||
- "**/*token*"
|
||||
- ".github/workflows/security*"
|
||||
|
||||
41
.github/pr-title-labeler.yml
vendored
41
.github/pr-title-labeler.yml
vendored
@@ -1,41 +0,0 @@
|
||||
# PR title labeler config
|
||||
#
|
||||
# Labels PRs based on conventional commit patterns in titles
|
||||
#
|
||||
# Format: type(scope): description or type!: description (breaking)
|
||||
|
||||
add-missing-labels: true
|
||||
clear-prexisting: false
|
||||
include-commits: false
|
||||
include-title: true
|
||||
label-for-breaking-changes: breaking
|
||||
|
||||
label-mapping:
|
||||
documentation: ["docs"]
|
||||
feature: ["feat"]
|
||||
fix: ["fix"]
|
||||
infra: ["build", "ci", "chore"]
|
||||
integration:
|
||||
[
|
||||
"anthropic",
|
||||
"chroma",
|
||||
"deepseek",
|
||||
"exa",
|
||||
"fireworks",
|
||||
"groq",
|
||||
"huggingface",
|
||||
"mistralai",
|
||||
"nomic",
|
||||
"ollama",
|
||||
"openai",
|
||||
"perplexity",
|
||||
"prompty",
|
||||
"qdrant",
|
||||
"xai",
|
||||
]
|
||||
linting: ["style"]
|
||||
performance: ["perf"]
|
||||
refactor: ["refactor"]
|
||||
release: ["release"]
|
||||
revert: ["revert"]
|
||||
tests: ["test"]
|
||||
48
.github/scripts/check_diff.py
vendored
48
.github/scripts/check_diff.py
vendored
@@ -30,6 +30,7 @@ LANGCHAIN_DIRS = [
|
||||
"libs/text-splitters",
|
||||
"libs/langchain",
|
||||
"libs/langchain_v1",
|
||||
"libs/model-profiles",
|
||||
]
|
||||
|
||||
# When set to True, we are ignoring core dependents
|
||||
@@ -50,10 +51,6 @@ IGNORED_PARTNERS = [
|
||||
"prompty",
|
||||
]
|
||||
|
||||
PY_312_MAX_PACKAGES = [
|
||||
"libs/partners/chroma", # https://github.com/chroma-core/chroma/issues/4382
|
||||
]
|
||||
|
||||
|
||||
def all_package_dirs() -> Set[str]:
|
||||
return {
|
||||
@@ -134,32 +131,20 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
return _get_pydantic_test_configs(dir_)
|
||||
|
||||
if job == "codspeed":
|
||||
py_versions = ["3.12"] # 3.13 is not yet supported
|
||||
py_versions = ["3.13"]
|
||||
elif dir_ == "libs/core":
|
||||
py_versions = ["3.10", "3.11", "3.12", "3.13"]
|
||||
py_versions = ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||
# custom logic for specific directories
|
||||
|
||||
elif dir_ in PY_312_MAX_PACKAGES:
|
||||
py_versions = ["3.10", "3.12"]
|
||||
|
||||
elif dir_ == "libs/langchain" and job == "extended-tests":
|
||||
elif dir_ in {"libs/partners/chroma"}:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
elif dir_ == "libs/langchain_v1":
|
||||
py_versions = ["3.10", "3.13"]
|
||||
elif dir_ in {"libs/cli"}:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
|
||||
elif dir_ == ".":
|
||||
# unable to install with 3.13 because tokenizers doesn't support 3.13 yet
|
||||
py_versions = ["3.10", "3.12"]
|
||||
else:
|
||||
py_versions = ["3.10", "3.13"]
|
||||
py_versions = ["3.10", "3.14"]
|
||||
|
||||
return [{"working-directory": dir_, "python-version": py_v} for py_v in py_versions]
|
||||
|
||||
|
||||
def _get_pydantic_test_configs(
|
||||
dir_: str, *, python_version: str = "3.11"
|
||||
dir_: str, *, python_version: str = "3.12"
|
||||
) -> List[Dict[str, str]]:
|
||||
with open("./libs/core/uv.lock", "rb") as f:
|
||||
core_uv_lock_data = tomllib.load(f)
|
||||
@@ -264,7 +249,15 @@ if __name__ == "__main__":
|
||||
".github/scripts/check_diff.py",
|
||||
)
|
||||
):
|
||||
# add all LANGCHAIN_DIRS for infra changes
|
||||
# Infrastructure changes (workflows, actions, CI scripts) trigger tests on
|
||||
# all core packages as a safety measure. This ensures that changes to CI/CD
|
||||
# infrastructure don't inadvertently break package testing, even if the change
|
||||
# appears unrelated (e.g., documentation build workflows). This is intentionally
|
||||
# conservative to catch unexpected side effects from workflow modifications.
|
||||
#
|
||||
# Example: A PR modifying .github/workflows/api_doc_build.yml will trigger
|
||||
# lint/test jobs for libs/core, libs/text-splitters, libs/langchain, and
|
||||
# libs/langchain_v1, even though the workflow may only affect documentation.
|
||||
dirs_to_run["extended-test"].update(LANGCHAIN_DIRS)
|
||||
|
||||
if file.startswith("libs/core"):
|
||||
@@ -287,8 +280,6 @@ if __name__ == "__main__":
|
||||
# Note: won't run on external repo partners
|
||||
dirs_to_run["lint"].add("libs/standard-tests")
|
||||
dirs_to_run["test"].add("libs/standard-tests")
|
||||
dirs_to_run["lint"].add("libs/cli")
|
||||
dirs_to_run["test"].add("libs/cli")
|
||||
dirs_to_run["test"].add("libs/partners/mistralai")
|
||||
dirs_to_run["test"].add("libs/partners/openai")
|
||||
dirs_to_run["test"].add("libs/partners/anthropic")
|
||||
@@ -307,9 +298,16 @@ if __name__ == "__main__":
|
||||
if not filename.startswith(".")
|
||||
] != ["README.md"]:
|
||||
dirs_to_run["test"].add(f"libs/partners/{partner_dir}")
|
||||
dirs_to_run["codspeed"].add(f"libs/partners/{partner_dir}")
|
||||
# Skip codspeed for partners without benchmarks or in IGNORED_PARTNERS
|
||||
if partner_dir not in IGNORED_PARTNERS:
|
||||
dirs_to_run["codspeed"].add(f"libs/partners/{partner_dir}")
|
||||
# Skip if the directory was deleted or is just a tombstone readme
|
||||
elif file.startswith("libs/"):
|
||||
# Check if this is a root-level file in libs/ (e.g., libs/README.md)
|
||||
file_parts = file.split("/")
|
||||
if len(file_parts) == 2:
|
||||
# Root-level file in libs/, skip it (no tests needed)
|
||||
continue
|
||||
raise ValueError(
|
||||
f"Unknown lib: {file}. check_diff.py likely needs "
|
||||
"an update for this new library!"
|
||||
|
||||
7
.github/scripts/get_min_versions.py
vendored
7
.github/scripts/get_min_versions.py
vendored
@@ -2,7 +2,6 @@
|
||||
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from typing import Optional
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
@@ -54,7 +53,7 @@ def get_pypi_versions(package_name: str) -> List[str]:
|
||||
return list(response.json()["releases"].keys())
|
||||
|
||||
|
||||
def get_minimum_version(package_name: str, spec_string: str) -> Optional[str]:
|
||||
def get_minimum_version(package_name: str, spec_string: str) -> str | None:
|
||||
"""Find the minimum published version that satisfies the given constraints.
|
||||
|
||||
Args:
|
||||
@@ -99,7 +98,7 @@ def _check_python_version_from_requirement(
|
||||
return True
|
||||
else:
|
||||
marker_str = str(requirement.marker)
|
||||
if "python_version" or "python_full_version" in marker_str:
|
||||
if "python_version" in marker_str or "python_full_version" in marker_str:
|
||||
python_version_str = "".join(
|
||||
char
|
||||
for char in marker_str
|
||||
@@ -114,7 +113,7 @@ def get_min_version_from_toml(
|
||||
versions_for: str,
|
||||
python_version: str,
|
||||
*,
|
||||
include: Optional[list] = None,
|
||||
include: list | None = None,
|
||||
):
|
||||
# Parse the TOML file
|
||||
with open(toml_path, "rb") as file:
|
||||
|
||||
14
.github/workflows/_compile_integration_test.yml
vendored
14
.github/workflows/_compile_integration_test.yml
vendored
@@ -6,7 +6,7 @@
|
||||
#
|
||||
# Runs pytest with compile marker to check syntax/imports.
|
||||
|
||||
name: '🔗 Compile Integration Tests'
|
||||
name: "🔗 Compile Integration Tests"
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -33,26 +33,26 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: 'Python ${{ inputs.python-version }}'
|
||||
name: "Python ${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
- name: "🐍 Set up Python ${{ inputs.python-version }} + UV"
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
cache-suffix: compile-integration-tests-${{ inputs.working-directory }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: '📦 Install Integration Dependencies'
|
||||
- name: "📦 Install Integration Dependencies"
|
||||
shell: bash
|
||||
run: uv sync --group test --group test_integration
|
||||
|
||||
- name: '🔗 Check Integration Tests Compile'
|
||||
- name: "🔗 Check Integration Tests Compile"
|
||||
shell: bash
|
||||
run: uv run pytest -m compile tests/integration_tests
|
||||
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
- name: "🧹 Verify Clean Working Directory"
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
26
.github/workflows/_lint.yml
vendored
26
.github/workflows/_lint.yml
vendored
@@ -5,7 +5,7 @@
|
||||
#
|
||||
# Called as part of check_diffs.yml workflow.
|
||||
|
||||
name: '🧹 Linting'
|
||||
name: "🧹 Linting"
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -33,43 +33,49 @@ env:
|
||||
jobs:
|
||||
# Linting job - runs quality checks on package and test code
|
||||
build:
|
||||
name: 'Python ${{ inputs.python-version }}'
|
||||
name: "Python ${{ inputs.python-version }}"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v5
|
||||
- name: "📋 Checkout Code"
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
- name: "🐍 Set up Python ${{ inputs.python-version }} + UV"
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
cache-suffix: lint-${{ inputs.working-directory }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: '📦 Install Lint & Typing Dependencies'
|
||||
# - name: "🔒 Verify Lockfile is Up-to-Date"
|
||||
# working-directory: ${{ inputs.working-directory }}
|
||||
# run: |
|
||||
# unset UV_FROZEN
|
||||
# uv lock --check
|
||||
|
||||
- name: "📦 Install Lint & Typing Dependencies"
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv sync --group lint --group typing
|
||||
|
||||
- name: '🔍 Analyze Package Code with Linters'
|
||||
- name: "🔍 Analyze Package Code with Linters"
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint_package
|
||||
|
||||
- name: '📦 Install Test Dependencies (non-partners)'
|
||||
- name: "📦 Install Test Dependencies (non-partners)"
|
||||
# (For directories NOT starting with libs/partners/)
|
||||
if: ${{ ! startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv sync --inexact --group test
|
||||
- name: '📦 Install Test Dependencies'
|
||||
- name: "📦 Install Test Dependencies"
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
uv sync --inexact --group test --group test_integration
|
||||
|
||||
- name: '🔍 Analyze Test Code with Linters'
|
||||
- name: "🔍 Analyze Test Code with Linters"
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint_tests
|
||||
|
||||
40
.github/workflows/_release.yml
vendored
40
.github/workflows/_release.yml
vendored
@@ -19,7 +19,7 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
default: "libs/langchain"
|
||||
default: "libs/langchain_v1"
|
||||
release-version:
|
||||
required: true
|
||||
type: string
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
version: ${{ steps.check-version.outputs.version }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
outputs:
|
||||
release-body: ${{ steps.generate-release-body.outputs.release-body }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
repository: langchain-ai/langchain
|
||||
path: langchain
|
||||
@@ -149,8 +149,8 @@ jobs:
|
||||
fi
|
||||
fi
|
||||
|
||||
# if PREV_TAG is empty, let it be empty
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
# if PREV_TAG is empty or came out to 0.0.0, let it be empty
|
||||
if [ -z "$PREV_TAG" ] || [ "$PREV_TAG" = "$PKG_NAME==0.0.0" ]; then
|
||||
echo "No previous tag found - first release"
|
||||
else
|
||||
# confirm prev-tag actually exists in git repo with git tag
|
||||
@@ -179,8 +179,8 @@ jobs:
|
||||
PREV_TAG: ${{ steps.check-tags.outputs.prev-tag }}
|
||||
run: |
|
||||
PREAMBLE="Changes since $PREV_TAG"
|
||||
# if PREV_TAG is empty, then we are releasing the first version
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
# if PREV_TAG is empty or 0.0.0, then we are releasing the first version
|
||||
if [ -z "$PREV_TAG" ] || [ "$PREV_TAG" = "$PKG_NAME==0.0.0" ]; then
|
||||
PREAMBLE="Initial release"
|
||||
PREV_TAG=$(git rev-list --max-parents=0 HEAD)
|
||||
fi
|
||||
@@ -206,9 +206,9 @@ jobs:
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -237,7 +237,7 @@ jobs:
|
||||
contents: read
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
# We explicitly *don't* set up caching here. This ensures our tests are
|
||||
# maximally sensitive to catching breakage.
|
||||
@@ -258,7 +258,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -377,6 +377,7 @@ jobs:
|
||||
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
|
||||
DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }}
|
||||
PPLX_API_KEY: ${{ secrets.PPLX_API_KEY }}
|
||||
LANGCHAIN_TESTS_USER_AGENT: ${{ secrets.LANGCHAIN_TESTS_USER_AGENT }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
@@ -409,8 +410,9 @@ jobs:
|
||||
AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
LANGCHAIN_TESTS_USER_AGENT: ${{ secrets.LANGCHAIN_TESTS_USER_AGENT }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
# We implement this conditional as Github Actions does not have good support
|
||||
# for conditionally needing steps. https://github.com/actions/runner/issues/491
|
||||
@@ -428,7 +430,7 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v7
|
||||
if: startsWith(inputs.working-directory, 'libs/core')
|
||||
with:
|
||||
name: dist
|
||||
@@ -442,7 +444,7 @@ jobs:
|
||||
git ls-remote --tags origin "langchain-${{ matrix.partner }}*" \
|
||||
| awk '{print $2}' \
|
||||
| sed 's|refs/tags/||' \
|
||||
| grep -E '[0-9]+\.[0-9]+\.[0-9]+([a-zA-Z]+[0-9]+)?$' \
|
||||
| grep -E '[0-9]+\.[0-9]+\.[0-9]+$' \
|
||||
| sort -Vr \
|
||||
| head -n 1
|
||||
)"
|
||||
@@ -490,14 +492,14 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -530,14 +532,14 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Python + uv
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
21
.github/workflows/_test.yml
vendored
21
.github/workflows/_test.yml
vendored
@@ -1,7 +1,7 @@
|
||||
# Runs unit tests with both current and minimum supported dependency versions
|
||||
# to ensure compatibility across the supported range.
|
||||
|
||||
name: '🧪 Unit Testing'
|
||||
name: "🧪 Unit Testing"
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -30,12 +30,12 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: 'Python ${{ inputs.python-version }}'
|
||||
name: "Python ${{ inputs.python-version }}"
|
||||
steps:
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v5
|
||||
- name: "📋 Checkout Code"
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
- name: "🐍 Set up Python ${{ inputs.python-version }} + UV"
|
||||
uses: "./.github/actions/uv_setup"
|
||||
id: setup-python
|
||||
with:
|
||||
@@ -43,16 +43,16 @@ jobs:
|
||||
cache-suffix: test-${{ inputs.working-directory }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: '📦 Install Test Dependencies'
|
||||
- name: "📦 Install Test Dependencies"
|
||||
shell: bash
|
||||
run: uv sync --group test --dev
|
||||
|
||||
- name: '🧪 Run Core Unit Tests'
|
||||
- name: "🧪 Run Core Unit Tests"
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: '🔍 Calculate Minimum Dependency Versions'
|
||||
- name: "🔍 Calculate Minimum Dependency Versions"
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
shell: bash
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
- name: '🧪 Run Tests with Minimum Dependencies'
|
||||
- name: "🧪 Run Tests with Minimum Dependencies"
|
||||
if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
- name: "🧹 Verify Clean Working Directory"
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
@@ -83,4 +83,3 @@ jobs:
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
|
||||
|
||||
24
.github/workflows/_test_pydantic.yml
vendored
24
.github/workflows/_test_pydantic.yml
vendored
@@ -1,6 +1,6 @@
|
||||
# Facilitate unit testing against different Pydantic versions for a provided package.
|
||||
|
||||
name: '🐍 Pydantic Version Testing'
|
||||
name: "🐍 Pydantic Version Testing"
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -13,7 +13,7 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
default: "3.11"
|
||||
default: "3.12"
|
||||
pydantic-version:
|
||||
required: true
|
||||
type: string
|
||||
@@ -33,32 +33,34 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: 'Pydantic ~=${{ inputs.pydantic-version }}'
|
||||
name: "Pydantic ~=${{ inputs.pydantic-version }}"
|
||||
steps:
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v5
|
||||
- name: "📋 Checkout Code"
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: '🐍 Set up Python ${{ inputs.python-version }} + UV'
|
||||
- name: "🐍 Set up Python ${{ inputs.python-version }} + UV"
|
||||
uses: "./.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
cache-suffix: test-pydantic-${{ inputs.working-directory }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: '📦 Install Test Dependencies'
|
||||
- name: "📦 Install Test Dependencies"
|
||||
shell: bash
|
||||
run: uv sync --group test
|
||||
|
||||
- name: '🔄 Install Specific Pydantic Version'
|
||||
- name: "🔄 Install Specific Pydantic Version"
|
||||
shell: bash
|
||||
run: VIRTUAL_ENV=.venv uv pip install pydantic~=${{ inputs.pydantic-version }}
|
||||
env:
|
||||
PYDANTIC_VERSION: ${{ inputs.pydantic-version }}
|
||||
run: VIRTUAL_ENV=.venv uv pip install "pydantic~=$PYDANTIC_VERSION"
|
||||
|
||||
- name: '🧪 Run Core Tests'
|
||||
- name: "🧪 Run Core Tests"
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: '🧹 Verify Clean Working Directory'
|
||||
- name: "🧹 Verify Clean Working Directory"
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
107
.github/workflows/auto-label-by-package.yml
vendored
Normal file
107
.github/workflows/auto-label-by-package.yml
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
name: Auto Label Issues by Package
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, edited]
|
||||
|
||||
jobs:
|
||||
label-by-package:
|
||||
permissions:
|
||||
issues: write
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Sync package labels
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
const body = context.payload.issue.body || "";
|
||||
|
||||
// Extract text under "### Package"
|
||||
const match = body.match(/### Package\s+([\s\S]*?)\n###/i);
|
||||
if (!match) return;
|
||||
|
||||
const packageSection = match[1].trim();
|
||||
|
||||
// Mapping table for package names to labels
|
||||
const mapping = {
|
||||
"langchain": "langchain",
|
||||
"langchain-openai": "openai",
|
||||
"langchain-anthropic": "anthropic",
|
||||
"langchain-classic": "langchain-classic",
|
||||
"langchain-core": "core",
|
||||
"langchain-cli": "cli",
|
||||
"langchain-model-profiles": "model-profiles",
|
||||
"langchain-tests": "standard-tests",
|
||||
"langchain-text-splitters": "text-splitters",
|
||||
"langchain-chroma": "chroma",
|
||||
"langchain-deepseek": "deepseek",
|
||||
"langchain-exa": "exa",
|
||||
"langchain-fireworks": "fireworks",
|
||||
"langchain-groq": "groq",
|
||||
"langchain-huggingface": "huggingface",
|
||||
"langchain-mistralai": "mistralai",
|
||||
"langchain-nomic": "nomic",
|
||||
"langchain-ollama": "ollama",
|
||||
"langchain-perplexity": "perplexity",
|
||||
"langchain-prompty": "prompty",
|
||||
"langchain-qdrant": "qdrant",
|
||||
"langchain-xai": "xai",
|
||||
};
|
||||
|
||||
// All possible package labels we manage
|
||||
const allPackageLabels = Object.values(mapping);
|
||||
const selectedLabels = [];
|
||||
|
||||
// Check if this is checkbox format (multiple selection)
|
||||
const checkboxMatches = packageSection.match(/- \[x\]\s+([^\n\r]+)/gi);
|
||||
if (checkboxMatches) {
|
||||
// Handle checkbox format
|
||||
for (const match of checkboxMatches) {
|
||||
const packageName = match.replace(/- \[x\]\s+/i, '').trim();
|
||||
const label = mapping[packageName];
|
||||
if (label && !selectedLabels.includes(label)) {
|
||||
selectedLabels.push(label);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Handle dropdown format (single selection)
|
||||
const label = mapping[packageSection];
|
||||
if (label) {
|
||||
selectedLabels.push(label);
|
||||
}
|
||||
}
|
||||
|
||||
// Get current issue labels
|
||||
const issue = await github.rest.issues.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number
|
||||
});
|
||||
|
||||
const currentLabels = issue.data.labels.map(label => label.name);
|
||||
const currentPackageLabels = currentLabels.filter(label => allPackageLabels.includes(label));
|
||||
|
||||
// Determine labels to add and remove
|
||||
const labelsToAdd = selectedLabels.filter(label => !currentPackageLabels.includes(label));
|
||||
const labelsToRemove = currentPackageLabels.filter(label => !selectedLabels.includes(label));
|
||||
|
||||
// Add new labels
|
||||
if (labelsToAdd.length > 0) {
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
labels: labelsToAdd
|
||||
});
|
||||
}
|
||||
|
||||
// Remove old labels
|
||||
for (const label of labelsToRemove) {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
name: label
|
||||
});
|
||||
}
|
||||
10
.github/workflows/check_core_versions.yml
vendored
10
.github/workflows/check_core_versions.yml
vendored
@@ -2,13 +2,13 @@
|
||||
#
|
||||
# (Prevents releases with mismatched version numbers)
|
||||
|
||||
name: '🔍 Check Version Equality'
|
||||
name: "🔍 Check Version Equality"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'libs/core/pyproject.toml'
|
||||
- 'libs/core/langchain_core/version.py'
|
||||
- "libs/core/pyproject.toml"
|
||||
- "libs/core/langchain_core/version.py"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -18,9 +18,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: '✅ Verify pyproject.toml & version.py Match'
|
||||
- name: "✅ Verify pyproject.toml & version.py Match"
|
||||
run: |
|
||||
# Check core versions
|
||||
CORE_PYPROJECT_VERSION=$(grep -Po '(?<=^version = ")[^"]*' libs/core/pyproject.toml)
|
||||
|
||||
13
.github/workflows/check_diffs.yml
vendored
13
.github/workflows/check_diffs.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'ci-ignore') }}
|
||||
steps:
|
||||
- name: "📋 Checkout Code"
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
- name: "🐍 Setup Python 3.11"
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
@@ -141,7 +141,7 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.job-configs.python-version }} + UV"
|
||||
uses: "./.github/actions/uv_setup"
|
||||
@@ -182,17 +182,16 @@ jobs:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.codspeed) }}
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
# We have to use 3.12 as 3.13 is not yet supported
|
||||
- name: "📦 Install UV Package Manager"
|
||||
uses: astral-sh/setup-uv@v6
|
||||
uses: astral-sh/setup-uv@v7
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: "3.13"
|
||||
|
||||
- name: "📦 Install Test Dependencies"
|
||||
run: uv sync --group test
|
||||
|
||||
30
.github/workflows/integration_tests.yml
vendored
30
.github/workflows/integration_tests.yml
vendored
@@ -23,10 +23,8 @@ permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.8.4"
|
||||
UV_FROZEN: "true"
|
||||
DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/xai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]'
|
||||
POETRY_LIBS: ("libs/partners/aws")
|
||||
|
||||
jobs:
|
||||
# Generate dynamic test matrix based on input parameters or defaults
|
||||
@@ -60,7 +58,6 @@ jobs:
|
||||
echo $matrix
|
||||
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||
# Run integration tests against partner libraries with live API credentials
|
||||
# Tests are run with Poetry or UV depending on the library's setup
|
||||
build:
|
||||
if: github.repository_owner == 'langchain-ai' || github.event_name != 'schedule'
|
||||
name: "🐍 Python ${{ matrix.python-version }}: ${{ matrix.working-directory }}"
|
||||
@@ -74,14 +71,14 @@ jobs:
|
||||
working-directory: ${{ fromJSON(needs.compute-matrix.outputs.matrix).working-directory }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
path: langchain
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
repository: langchain-ai/langchain-aws
|
||||
path: langchain-aws
|
||||
@@ -95,17 +92,7 @@ jobs:
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-aws/libs/aws langchain/libs/partners/aws
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + Poetry"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: langchain/${{ matrix.working-directory }}
|
||||
cache-key: scheduled
|
||||
|
||||
- name: "🐍 Set up Python ${{ matrix.python-version }} + UV"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
uses: "./langchain/.github/actions/uv_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -123,15 +110,7 @@ jobs:
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: "📦 Install Dependencies (Poetry)"
|
||||
if: contains(env.POETRY_LIBS, matrix.working-directory)
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
poetry install --with=test_integration,test
|
||||
|
||||
- name: "📦 Install Dependencies (UV)"
|
||||
if: "!contains(env.POETRY_LIBS, matrix.working-directory)"
|
||||
- name: "📦 Install Dependencies"
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with uv..."
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
@@ -176,6 +155,7 @@ jobs:
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
|
||||
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
|
||||
LANGCHAIN_TESTS_USER_AGENT: ${{ secrets.LANGCHAIN_TESTS_USER_AGENT }}
|
||||
run: |
|
||||
cd langchain/${{ matrix.working-directory }}
|
||||
make integration_tests
|
||||
|
||||
14
.github/workflows/pr_labeler_file.yml
vendored
14
.github/workflows/pr_labeler_file.yml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
name: 'label'
|
||||
name: "label"
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
@@ -20,9 +20,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Label Pull Request
|
||||
uses: actions/labeler@v6
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
configuration-path: .github/pr-file-labeler.yml
|
||||
sync-labels: false
|
||||
- name: Label Pull Request
|
||||
uses: actions/labeler@v6
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
configuration-path: .github/pr-file-labeler.yml
|
||||
sync-labels: false
|
||||
|
||||
34
.github/workflows/pr_labeler_title.yml
vendored
34
.github/workflows/pr_labeler_title.yml
vendored
@@ -1,6 +1,7 @@
|
||||
# Label PRs based on their titles.
|
||||
#
|
||||
# See `.github/pr-title-labeler.yml` to see rules for each label/title pattern.
|
||||
# Uses conventional commit types from PR titles to apply labels.
|
||||
# Note: Scope-based labeling (e.g., integration labels) is handled by pr_labeler_file.yml
|
||||
|
||||
name: "🏷️ PR Title Labeler"
|
||||
|
||||
@@ -8,11 +9,11 @@ on:
|
||||
# Safe since we're not checking out or running the PR's code
|
||||
# Never check out the PR's head in a pull_request_target job
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, edited]
|
||||
types: [opened, edited]
|
||||
|
||||
jobs:
|
||||
pr-title-labeler:
|
||||
name: 'label'
|
||||
name: "label"
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
@@ -20,9 +21,24 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Label PR based on title
|
||||
# Archived repo; latest commit (v0.1.0)
|
||||
uses: grafana/pr-labeler-action@f19222d3ef883d2ca5f04420fdfe8148003763f0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
configuration-path: .github/pr-title-labeler.yml
|
||||
- name: Label PR based on title
|
||||
uses: bcoe/conventional-release-labels@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
type_labels: >-
|
||||
{
|
||||
"feat": "feature",
|
||||
"fix": "fix",
|
||||
"docs": "documentation",
|
||||
"style": "linting",
|
||||
"refactor": "refactor",
|
||||
"perf": "performance",
|
||||
"test": "tests",
|
||||
"build": "infra",
|
||||
"ci": "infra",
|
||||
"chore": "infra",
|
||||
"revert": "revert",
|
||||
"release": "release",
|
||||
"breaking": "breaking"
|
||||
}
|
||||
ignored_types: '[]'
|
||||
|
||||
17
.github/workflows/pr_lint.yml
vendored
17
.github/workflows/pr_lint.yml
vendored
@@ -26,11 +26,13 @@
|
||||
# * revert — reverts a previous commit
|
||||
# * release — prepare a new release
|
||||
#
|
||||
# Allowed Scopes (optional):
|
||||
# core, cli, langchain, langchain_v1, langchain_legacy, standard-tests,
|
||||
# text-splitters, docs, anthropic, chroma, deepseek, exa, fireworks, groq,
|
||||
# huggingface, mistralai, nomic, ollama, openai, perplexity, prompty, qdrant,
|
||||
# xai, infra
|
||||
# Allowed Scope(s) (optional):
|
||||
# core, cli, langchain, langchain_v1, langchain-classic, model-profiles,
|
||||
# standard-tests, text-splitters, docs, anthropic, chroma, deepseek, exa,
|
||||
# fireworks, groq, huggingface, mistralai, nomic, ollama, openai,
|
||||
# perplexity, prompty, qdrant, xai, infra, deps
|
||||
#
|
||||
# Multiple scopes can be used by separating them with a comma.
|
||||
#
|
||||
# Rules:
|
||||
# 1. The 'Type' must start with a lowercase letter.
|
||||
@@ -79,8 +81,8 @@ jobs:
|
||||
core
|
||||
cli
|
||||
langchain
|
||||
langchain_v1
|
||||
langchain_legacy
|
||||
langchain-classic
|
||||
model-profiles
|
||||
standard-tests
|
||||
text-splitters
|
||||
docs
|
||||
@@ -100,6 +102,7 @@ jobs:
|
||||
qdrant
|
||||
xai
|
||||
infra
|
||||
deps
|
||||
requireScope: false
|
||||
disallowScopes: |
|
||||
release
|
||||
|
||||
164
.github/workflows/v03_api_doc_build.yml
vendored
Normal file
164
.github/workflows/v03_api_doc_build.yml
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
# Build the API reference documentation for v0.3 branch.
|
||||
#
|
||||
# Manual trigger only.
|
||||
#
|
||||
# Built HTML pushed to langchain-ai/langchain-api-docs-html.
|
||||
#
|
||||
# Looks for langchain-ai org repos in packages.yml and checks them out.
|
||||
# Calls prep_api_docs_build.py.
|
||||
|
||||
name: "📚 API Docs (v0.3)"
|
||||
run-name: "Build & Deploy API Reference (v0.3)"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'langchain-ai/langchain' || github.event_name != 'schedule'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
ref: v0.3
|
||||
path: langchain
|
||||
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
repository: langchain-ai/langchain-api-docs-html
|
||||
path: langchain-api-docs-html
|
||||
token: ${{ secrets.TOKEN_GITHUB_API_DOCS_HTML }}
|
||||
|
||||
- name: "📋 Extract Repository List with yq"
|
||||
id: get-unsorted-repos
|
||||
uses: mikefarah/yq@master
|
||||
with:
|
||||
cmd: |
|
||||
# Extract repos from packages.yml that are in the langchain-ai org
|
||||
# (excluding 'langchain' itself)
|
||||
yq '
|
||||
.packages[]
|
||||
| select(
|
||||
(
|
||||
(.repo | test("^langchain-ai/"))
|
||||
and
|
||||
(.repo != "langchain-ai/langchain")
|
||||
)
|
||||
or
|
||||
(.include_in_api_ref // false)
|
||||
)
|
||||
| .repo
|
||||
' langchain/libs/packages.yml
|
||||
|
||||
- name: "📋 Parse YAML & Checkout Repositories"
|
||||
env:
|
||||
REPOS_UNSORTED: ${{ steps.get-unsorted-repos.outputs.result }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Get unique repositories
|
||||
REPOS=$(echo "$REPOS_UNSORTED" | sort -u)
|
||||
# Checkout each unique repository
|
||||
for repo in $REPOS; do
|
||||
# Validate repository format (allow any org with proper format)
|
||||
if [[ ! "$repo" =~ ^[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+$ ]]; then
|
||||
echo "Error: Invalid repository format: $repo"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
REPO_NAME=$(echo $repo | cut -d'/' -f2)
|
||||
|
||||
# Additional validation for repo name
|
||||
if [[ ! "$REPO_NAME" =~ ^[a-zA-Z0-9_.-]+$ ]]; then
|
||||
echo "Error: Invalid repository name: $REPO_NAME"
|
||||
exit 1
|
||||
fi
|
||||
echo "Checking out $repo to $REPO_NAME"
|
||||
|
||||
# Special handling for langchain-tavily: checkout by commit hash
|
||||
if [[ "$REPO_NAME" == "langchain-tavily" ]]; then
|
||||
git clone https://github.com/$repo.git $REPO_NAME
|
||||
cd $REPO_NAME
|
||||
git checkout f3515654724a9e87bdfe2c2f509d6cdde646e563
|
||||
cd ..
|
||||
else
|
||||
git clone --depth 1 --branch v0.3 https://github.com/$repo.git $REPO_NAME
|
||||
fi
|
||||
done
|
||||
|
||||
- name: "🐍 Setup Python ${{ env.PYTHON_VERSION }}"
|
||||
uses: actions/setup-python@v6
|
||||
id: setup-python
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: "📦 Install Initial Python Dependencies using uv"
|
||||
working-directory: langchain
|
||||
run: |
|
||||
python -m pip install -U uv
|
||||
python -m uv pip install --upgrade --no-cache-dir pip setuptools pyyaml
|
||||
|
||||
- name: "📦 Organize Library Directories"
|
||||
# Places cloned partner packages into libs/partners structure
|
||||
run: python langchain/.github/scripts/prep_api_docs_build.py
|
||||
|
||||
- name: "🧹 Clear Prior Build"
|
||||
run:
|
||||
# Remove artifacts from prior docs build
|
||||
rm -rf langchain-api-docs-html/api_reference_build/html
|
||||
|
||||
- name: "📦 Install Documentation Dependencies using uv"
|
||||
working-directory: langchain
|
||||
run: |
|
||||
# Install all partner packages in editable mode with overrides
|
||||
python -m uv pip install $(ls ./libs/partners | grep -v azure-ai | xargs -I {} echo "./libs/partners/{}") --overrides ./docs/vercel_overrides.txt --prerelease=allow
|
||||
|
||||
# Install langchain-azure-ai with tools extra
|
||||
python -m uv pip install "./libs/partners/azure-ai[tools]" --overrides ./docs/vercel_overrides.txt --prerelease=allow
|
||||
|
||||
# Install core langchain and other main packages
|
||||
python -m uv pip install libs/core libs/langchain libs/text-splitters libs/community libs/experimental libs/standard-tests
|
||||
|
||||
# Install Sphinx and related packages for building docs
|
||||
python -m uv pip install -r docs/api_reference/requirements.txt
|
||||
|
||||
- name: "🔧 Configure Git Settings"
|
||||
working-directory: langchain
|
||||
run: |
|
||||
git config --local user.email "actions@github.com"
|
||||
git config --local user.name "Github Actions"
|
||||
|
||||
- name: "📚 Build API Documentation"
|
||||
working-directory: langchain
|
||||
run: |
|
||||
# Generate the API reference RST files
|
||||
python docs/api_reference/create_api_rst.py
|
||||
|
||||
# Build the HTML documentation using Sphinx
|
||||
# -T: show full traceback on exception
|
||||
# -E: don't use cached environment (force rebuild, ignore cached doctrees)
|
||||
# -b html: build HTML docs (vs PDS, etc.)
|
||||
# -d: path for the cached environment (parsed document trees / doctrees)
|
||||
# - Separate from output dir for faster incremental builds
|
||||
# -c: path to conf.py
|
||||
# -j auto: parallel build using all available CPU cores
|
||||
python -m sphinx -T -E -b html -d ../langchain-api-docs-html/_build/doctrees -c docs/api_reference docs/api_reference ../langchain-api-docs-html/api_reference_build/html -j auto
|
||||
|
||||
# Post-process the generated HTML
|
||||
python docs/api_reference/scripts/custom_formatter.py ../langchain-api-docs-html/api_reference_build/html
|
||||
|
||||
# Default index page is blank so we copy in the actual home page.
|
||||
cp ../langchain-api-docs-html/api_reference_build/html/{reference,index}.html
|
||||
|
||||
# Removes Sphinx's intermediate build artifacts after the build is complete.
|
||||
rm -rf ../langchain-api-docs-html/_build/
|
||||
|
||||
# Commit and push changes to langchain-api-docs-html repo
|
||||
- uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
cwd: langchain-api-docs-html
|
||||
message: "Update API docs build from v0.3 branch"
|
||||
8
.github/workflows/v1_changes.md
vendored
8
.github/workflows/v1_changes.md
vendored
@@ -1,8 +0,0 @@
|
||||
With the deprecation of v0 docs, the following files will need to be migrated/supported
|
||||
in the new docs repo:
|
||||
|
||||
- run_notebooks.yml: New repo should run Integration tests on code snippets?
|
||||
- people.yml: Need to fix and somehow display on the new docs site
|
||||
- Subsequently, `.github/actions/people/`
|
||||
- _test_doc_imports.yml
|
||||
- check-broken-links.yml
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,5 +1,8 @@
|
||||
.vs/
|
||||
.claude/
|
||||
.idea/
|
||||
#Emacs backup
|
||||
*~
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
@@ -160,3 +163,6 @@ node_modules
|
||||
|
||||
prof
|
||||
virtualenv/
|
||||
scratch/
|
||||
|
||||
.langgraph_api/
|
||||
|
||||
8
.mcp.json
Normal file
8
.mcp.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"docs-langchain": {
|
||||
"type": "http",
|
||||
"url": "https://docs.langchain.com/mcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -97,9 +97,3 @@ repos:
|
||||
entry: make -C libs/partners/qdrant format lint
|
||||
files: ^libs/partners/qdrant/
|
||||
pass_filenames: false
|
||||
- id: root
|
||||
name: format and lint docs, cookbook
|
||||
language: system
|
||||
entry: make format lint
|
||||
files: ^(docs|cookbook)/
|
||||
pass_filenames: false
|
||||
|
||||
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -1,14 +1,12 @@
|
||||
{
|
||||
"python.analysis.include": [
|
||||
"libs/**",
|
||||
"cookbook/**"
|
||||
],
|
||||
"python.analysis.exclude": [
|
||||
"**/node_modules",
|
||||
"**/__pycache__",
|
||||
"**/.pytest_cache",
|
||||
"**/.*",
|
||||
"_dist/**",
|
||||
],
|
||||
"python.analysis.autoImportCompletions": true,
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
|
||||
403
AGENTS.md
403
AGENTS.md
@@ -1,253 +1,58 @@
|
||||
# Global Development Guidelines for LangChain Projects
|
||||
# Global development guidelines for the LangChain monorepo
|
||||
|
||||
## Core Development Principles
|
||||
This document provides context to understand the LangChain Python project and assist with development.
|
||||
|
||||
### 1. Maintain Stable Public Interfaces ⚠️ CRITICAL
|
||||
## Project architecture and context
|
||||
|
||||
**Always attempt to preserve function signatures, argument positions, and names for exported/public methods.**
|
||||
### Monorepo structure
|
||||
|
||||
❌ **Bad - Breaking Change:**
|
||||
This is a Python monorepo with multiple independently versioned packages that use `uv`.
|
||||
|
||||
```python
|
||||
def get_user(id, verbose=False): # Changed from `user_id`
|
||||
pass
|
||||
```txt
|
||||
langchain/
|
||||
├── libs/
|
||||
│ ├── core/ # `langchain-core` primitives and base abstractions
|
||||
│ ├── langchain/ # `langchain-classic` (legacy, no new features)
|
||||
│ ├── langchain_v1/ # Actively maintained `langchain` package
|
||||
│ ├── partners/ # Third-party integrations
|
||||
│ │ ├── openai/ # OpenAI models and embeddings
|
||||
│ │ ├── anthropic/ # Anthropic (Claude) integration
|
||||
│ │ ├── ollama/ # Local model support
|
||||
│ │ └── ... (other integrations maintained by the LangChain team)
|
||||
│ ├── text-splitters/ # Document chunking utilities
|
||||
│ ├── standard-tests/ # Shared test suite for integrations
|
||||
│ ├── model-profiles/ # Model configuration profiles
|
||||
│ └── cli/ # Command-line interface tools
|
||||
├── .github/ # CI/CD workflows and templates
|
||||
├── .vscode/ # VSCode IDE standard settings and recommended extensions
|
||||
└── README.md # Information about LangChain
|
||||
```
|
||||
|
||||
✅ **Good - Stable Interface:**
|
||||
- **Core layer** (`langchain-core`): Base abstractions, interfaces, and protocols. Users should not need to know about this layer directly.
|
||||
- **Implementation layer** (`langchain`): Concrete implementations and high-level public utilities
|
||||
- **Integration layer** (`partners/`): Third-party service integrations. Note that this monorepo is not exhaustive of all LangChain integrations; some are maintained in separate repos, such as `langchain-ai/langchain-google` and `langchain-ai/langchain-aws`. Usually these repos are cloned at the same level as this monorepo, so if needed, you can refer to their code directly by navigating to `../langchain-google/` from this monorepo.
|
||||
- **Testing layer** (`standard-tests/`): Standardized integration tests for partner integrations
|
||||
|
||||
```python
|
||||
def get_user(user_id: str, verbose: bool = False) -> User:
|
||||
"""Retrieve user by ID with optional verbose output."""
|
||||
pass
|
||||
```
|
||||
### Development tools & commands**
|
||||
|
||||
**Before making ANY changes to public APIs:**
|
||||
- `uv` – Fast Python package installer and resolver (replaces pip/poetry)
|
||||
- `make` – Task runner for common development commands. Feel free to look at the `Makefile` for available commands and usage patterns.
|
||||
- `ruff` – Fast Python linter and formatter
|
||||
- `mypy` – Static type checking
|
||||
- `pytest` – Testing framework
|
||||
|
||||
- Check if the function/class is exported in `__init__.py`
|
||||
- Look for existing usage patterns in tests and examples
|
||||
- Use keyword-only arguments for new parameters: `*, new_param: str = "default"`
|
||||
- Mark experimental features clearly with docstring warnings (using MkDocs Material admonitions, like `!!! warning`)
|
||||
This monorepo uses `uv` for dependency management. Local development uses editable installs: `[tool.uv.sources]`
|
||||
|
||||
🧠 *Ask yourself:* "Would this change break someone's code if they used it last week?"
|
||||
|
||||
### 2. Code Quality Standards
|
||||
|
||||
**All Python code MUST include type hints and return types.**
|
||||
|
||||
❌ **Bad:**
|
||||
|
||||
```python
|
||||
def p(u, d):
|
||||
return [x for x in u if x not in d]
|
||||
```
|
||||
|
||||
✅ **Good:**
|
||||
|
||||
```python
|
||||
def filter_unknown_users(users: list[str], known_users: set[str]) -> list[str]:
|
||||
"""Filter out users that are not in the known users set.
|
||||
|
||||
Args:
|
||||
users: List of user identifiers to filter.
|
||||
known_users: Set of known/valid user identifiers.
|
||||
|
||||
Returns:
|
||||
List of users that are not in the known_users set.
|
||||
"""
|
||||
return [user for user in users if user not in known_users]
|
||||
```
|
||||
|
||||
**Style Requirements:**
|
||||
|
||||
- Use descriptive, **self-explanatory variable names**. Avoid overly short or cryptic identifiers.
|
||||
- Attempt to break up complex functions (>20 lines) into smaller, focused functions where it makes sense
|
||||
- Avoid unnecessary abstraction or premature optimization
|
||||
- Follow existing patterns in the codebase you're modifying
|
||||
|
||||
### 3. Testing Requirements
|
||||
|
||||
**Every new feature or bugfix MUST be covered by unit tests.**
|
||||
|
||||
**Test Organization:**
|
||||
|
||||
- Unit tests: `tests/unit_tests/` (no network calls allowed)
|
||||
- Integration tests: `tests/integration_tests/` (network calls permitted)
|
||||
- Use `pytest` as the testing framework
|
||||
|
||||
**Test Quality Checklist:**
|
||||
|
||||
- [ ] Tests fail when your new logic is broken
|
||||
- [ ] Happy path is covered
|
||||
- [ ] Edge cases and error conditions are tested
|
||||
- [ ] Use fixtures/mocks for external dependencies
|
||||
- [ ] Tests are deterministic (no flaky tests)
|
||||
|
||||
Checklist questions:
|
||||
|
||||
- [ ] Does the test suite fail if your new logic is broken?
|
||||
- [ ] Are all expected behaviors exercised (happy path, invalid input, etc)?
|
||||
- [ ] Do tests use fixtures or mocks where needed?
|
||||
|
||||
```python
|
||||
def test_filter_unknown_users():
|
||||
"""Test filtering unknown users from a list."""
|
||||
users = ["alice", "bob", "charlie"]
|
||||
known_users = {"alice", "bob"}
|
||||
|
||||
result = filter_unknown_users(users, known_users)
|
||||
|
||||
assert result == ["charlie"]
|
||||
assert len(result) == 1
|
||||
```
|
||||
|
||||
### 4. Security and Risk Assessment
|
||||
|
||||
**Security Checklist:**
|
||||
|
||||
- No `eval()`, `exec()`, or `pickle` on user-controlled input
|
||||
- Proper exception handling (no bare `except:`) and use a `msg` variable for error messages
|
||||
- Remove unreachable/commented code before committing
|
||||
- Race conditions or resource leaks (file handles, sockets, threads).
|
||||
- Ensure proper resource cleanup (file handles, connections)
|
||||
|
||||
❌ **Bad:**
|
||||
|
||||
```python
|
||||
def load_config(path):
|
||||
with open(path) as f:
|
||||
return eval(f.read()) # ⚠️ Never eval config
|
||||
```
|
||||
|
||||
✅ **Good:**
|
||||
|
||||
```python
|
||||
import json
|
||||
|
||||
def load_config(path: str) -> dict:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
```
|
||||
|
||||
### 5. Documentation Standards
|
||||
|
||||
**Use Google-style docstrings with Args section for all public functions.**
|
||||
|
||||
❌ **Insufficient Documentation:**
|
||||
|
||||
```python
|
||||
def send_email(to, msg):
|
||||
"""Send an email to a recipient."""
|
||||
```
|
||||
|
||||
✅ **Complete Documentation:**
|
||||
|
||||
```python
|
||||
def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
"""
|
||||
Send an email to a recipient with specified priority.
|
||||
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (``'low'``, ``'normal'``, ``'high'``).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
### 6. Architectural Improvements
|
||||
|
||||
**When you encounter code that could be improved, suggest better designs:**
|
||||
|
||||
❌ **Poor Design:**
|
||||
|
||||
```python
|
||||
def process_data(data, db_conn, email_client, logger):
|
||||
# Function doing too many things
|
||||
validated = validate_data(data)
|
||||
result = db_conn.save(validated)
|
||||
email_client.send_notification(result)
|
||||
logger.log(f"Processed {len(data)} items")
|
||||
return result
|
||||
```
|
||||
|
||||
✅ **Better Design:**
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class ProcessingResult:
|
||||
"""Result of data processing operation."""
|
||||
items_processed: int
|
||||
success: bool
|
||||
errors: List[str] = field(default_factory=list)
|
||||
|
||||
class DataProcessor:
|
||||
"""Handles data validation, storage, and notification."""
|
||||
|
||||
def __init__(self, db_conn: Database, email_client: EmailClient):
|
||||
self.db = db_conn
|
||||
self.email = email_client
|
||||
|
||||
def process(self, data: List[dict]) -> ProcessingResult:
|
||||
"""Process and store data with notifications."""
|
||||
validated = self._validate_data(data)
|
||||
result = self.db.save(validated)
|
||||
self._notify_completion(result)
|
||||
return result
|
||||
```
|
||||
|
||||
**Design Improvement Areas:**
|
||||
|
||||
If there's a **cleaner**, **more scalable**, or **simpler** design, highlight it and suggest improvements that would:
|
||||
|
||||
- Reduce code duplication through shared utilities
|
||||
- Make unit testing easier
|
||||
- Improve separation of concerns (single responsibility)
|
||||
- Make unit testing easier through dependency injection
|
||||
- Add clarity without adding complexity
|
||||
- Prefer dataclasses for structured data
|
||||
|
||||
## Development Tools & Commands
|
||||
|
||||
### Package Management
|
||||
|
||||
```bash
|
||||
# Add package
|
||||
uv add package-name
|
||||
|
||||
# Sync project dependencies
|
||||
uv sync
|
||||
uv lock
|
||||
```
|
||||
|
||||
### Testing
|
||||
Each package in `libs/` has its own `pyproject.toml` and `uv.lock`.
|
||||
|
||||
```bash
|
||||
# Run unit tests (no network)
|
||||
make test
|
||||
|
||||
# Don't run integration tests, as API keys must be set
|
||||
|
||||
# Run specific test file
|
||||
uv run --group test pytest tests/unit_tests/test_specific.py
|
||||
```
|
||||
|
||||
### Code Quality
|
||||
|
||||
```bash
|
||||
# Lint code
|
||||
make lint
|
||||
@@ -259,66 +64,118 @@ make format
|
||||
uv run --group lint mypy .
|
||||
```
|
||||
|
||||
### Dependency Management Patterns
|
||||
#### Key config files
|
||||
|
||||
**Local Development Dependencies:**
|
||||
- pyproject.toml: Main workspace configuration with dependency groups
|
||||
- uv.lock: Locked dependencies for reproducible builds
|
||||
- Makefile: Development tasks
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
langchain-core = { path = "../core", editable = true }
|
||||
langchain-tests = { path = "../standard-tests", editable = true }
|
||||
```
|
||||
#### Commit standards
|
||||
|
||||
**For tools, use the `@tool` decorator from `langchain_core.tools`:**
|
||||
Suggest PR titles that follow Conventional Commits format. Refer to .github/workflows/pr_lint for allowed types and scopes.
|
||||
|
||||
```python
|
||||
from langchain_core.tools import tool
|
||||
#### Pull request guidelines
|
||||
|
||||
@tool
|
||||
def search_database(query: str) -> str:
|
||||
"""Search the database for relevant information.
|
||||
- Always add a disclaimer to the PR description mentioning how AI agents are involved with the contribution.
|
||||
- Describe the "why" of the changes, why the proposed solution is the right one. Limit prose.
|
||||
- Highlight areas of the proposed changes that require careful review.
|
||||
|
||||
## Core development principles
|
||||
|
||||
### Maintain stable public interfaces
|
||||
|
||||
CRITICAL: Always attempt to preserve function signatures, argument positions, and names for exported/public methods. Do not make breaking changes.
|
||||
|
||||
**Before making ANY changes to public APIs:**
|
||||
|
||||
- Check if the function/class is exported in `__init__.py`
|
||||
- Look for existing usage patterns in tests and examples
|
||||
- Use keyword-only arguments for new parameters: `*, new_param: str = "default"`
|
||||
- Mark experimental features clearly with docstring warnings (using MkDocs Material admonitions, like `!!! warning`)
|
||||
|
||||
Ask: "Would this change break someone's code if they used it last week?"
|
||||
|
||||
### Code quality standards
|
||||
|
||||
All Python code MUST include type hints and return types.
|
||||
|
||||
```python title="Example"
|
||||
def filter_unknown_users(users: list[str], known_users: set[str]) -> list[str]:
|
||||
"""Single line description of the function.
|
||||
|
||||
Any additional context about the function can go here.
|
||||
|
||||
Args:
|
||||
query: The search query string.
|
||||
users: List of user identifiers to filter.
|
||||
known_users: Set of known/valid user identifiers.
|
||||
|
||||
Returns:
|
||||
List of users that are not in the known_users set.
|
||||
"""
|
||||
# Implementation here
|
||||
return results
|
||||
```
|
||||
|
||||
## Commit Standards
|
||||
- Use descriptive, self-explanatory variable names.
|
||||
- Follow existing patterns in the codebase you're modifying
|
||||
- Attempt to break up complex functions (>20 lines) into smaller, focused functions where it makes sense
|
||||
|
||||
**Use Conventional Commits format for PR titles:**
|
||||
### Testing requirements
|
||||
|
||||
- `feat(core): add multi-tenant support`
|
||||
- `fix(cli): resolve flag parsing error`
|
||||
- `docs: update API usage examples`
|
||||
- `docs(openai): update API usage examples`
|
||||
Every new feature or bugfix MUST be covered by unit tests.
|
||||
|
||||
## Framework-Specific Guidelines
|
||||
- Unit tests: `tests/unit_tests/` (no network calls allowed)
|
||||
- Integration tests: `tests/integration_tests/` (network calls permitted)
|
||||
- We use `pytest` as the testing framework; if in doubt, check other existing tests for examples.
|
||||
- The testing file structure should mirror the source code structure.
|
||||
|
||||
- Follow the existing patterns in `langchain-core` for base abstractions
|
||||
- Use `langchain_core.callbacks` for execution tracking
|
||||
- Implement proper streaming support where applicable
|
||||
- Avoid deprecated components like legacy `LLMChain`
|
||||
**Checklist:**
|
||||
|
||||
### Partner Integrations
|
||||
- [ ] Tests fail when your new logic is broken
|
||||
- [ ] Happy path is covered
|
||||
- [ ] Edge cases and error conditions are tested
|
||||
- [ ] Use fixtures/mocks for external dependencies
|
||||
- [ ] Tests are deterministic (no flaky tests)
|
||||
- [ ] Does the test suite fail if your new logic is broken?
|
||||
|
||||
- Follow the established patterns in existing partner libraries
|
||||
- Implement standard interfaces (`BaseChatModel`, `BaseEmbeddings`, etc.)
|
||||
- Include comprehensive integration tests
|
||||
- Document API key requirements and authentication
|
||||
### Security and risk assessment
|
||||
|
||||
---
|
||||
- No `eval()`, `exec()`, or `pickle` on user-controlled input
|
||||
- Proper exception handling (no bare `except:`) and use a `msg` variable for error messages
|
||||
- Remove unreachable/commented code before committing
|
||||
- Race conditions or resource leaks (file handles, sockets, threads).
|
||||
- Ensure proper resource cleanup (file handles, connections)
|
||||
|
||||
## Quick Reference Checklist
|
||||
### Documentation standards
|
||||
|
||||
Before submitting code changes:
|
||||
Use Google-style docstrings with Args section for all public functions.
|
||||
|
||||
- [ ] **Breaking Changes**: Verified no public API changes
|
||||
- [ ] **Type Hints**: All functions have complete type annotations
|
||||
- [ ] **Tests**: New functionality is fully tested
|
||||
- [ ] **Security**: No dangerous patterns (eval, silent failures, etc.)
|
||||
- [ ] **Documentation**: Google-style docstrings for public functions
|
||||
- [ ] **Code Quality**: `make lint` and `make format` pass
|
||||
- [ ] **Architecture**: Suggested improvements where applicable
|
||||
- [ ] **Commit Message**: Follows Conventional Commits format
|
||||
```python title="Example"
|
||||
def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
"""Send an email to a recipient with specified priority.
|
||||
|
||||
Any additional context about the function can go here.
|
||||
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level.
|
||||
|
||||
Returns:
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
## Additional resources
|
||||
|
||||
- **Documentation:** https://docs.langchain.com/oss/python/langchain/overview and source at https://github.com/langchain-ai/docs or `../docs/`. Prefer the local install and use file search tools for best results. If needed, use the docs MCP server as defined in `.mcp.json` for programmatic access.
|
||||
- **Contributing Guide:** [`.github/CONTRIBUTING.md`](https://docs.langchain.com/oss/python/contributing/overview)
|
||||
|
||||
403
CLAUDE.md
403
CLAUDE.md
@@ -1,253 +1,58 @@
|
||||
# Global Development Guidelines for LangChain Projects
|
||||
# Global development guidelines for the LangChain monorepo
|
||||
|
||||
## Core Development Principles
|
||||
This document provides context to understand the LangChain Python project and assist with development.
|
||||
|
||||
### 1. Maintain Stable Public Interfaces ⚠️ CRITICAL
|
||||
## Project architecture and context
|
||||
|
||||
**Always attempt to preserve function signatures, argument positions, and names for exported/public methods.**
|
||||
### Monorepo structure
|
||||
|
||||
❌ **Bad - Breaking Change:**
|
||||
This is a Python monorepo with multiple independently versioned packages that use `uv`.
|
||||
|
||||
```python
|
||||
def get_user(id, verbose=False): # Changed from `user_id`
|
||||
pass
|
||||
```txt
|
||||
langchain/
|
||||
├── libs/
|
||||
│ ├── core/ # `langchain-core` primitives and base abstractions
|
||||
│ ├── langchain/ # `langchain-classic` (legacy, no new features)
|
||||
│ ├── langchain_v1/ # Actively maintained `langchain` package
|
||||
│ ├── partners/ # Third-party integrations
|
||||
│ │ ├── openai/ # OpenAI models and embeddings
|
||||
│ │ ├── anthropic/ # Anthropic (Claude) integration
|
||||
│ │ ├── ollama/ # Local model support
|
||||
│ │ └── ... (other integrations maintained by the LangChain team)
|
||||
│ ├── text-splitters/ # Document chunking utilities
|
||||
│ ├── standard-tests/ # Shared test suite for integrations
|
||||
│ ├── model-profiles/ # Model configuration profiles
|
||||
│ └── cli/ # Command-line interface tools
|
||||
├── .github/ # CI/CD workflows and templates
|
||||
├── .vscode/ # VSCode IDE standard settings and recommended extensions
|
||||
└── README.md # Information about LangChain
|
||||
```
|
||||
|
||||
✅ **Good - Stable Interface:**
|
||||
- **Core layer** (`langchain-core`): Base abstractions, interfaces, and protocols. Users should not need to know about this layer directly.
|
||||
- **Implementation layer** (`langchain`): Concrete implementations and high-level public utilities
|
||||
- **Integration layer** (`partners/`): Third-party service integrations. Note that this monorepo is not exhaustive of all LangChain integrations; some are maintained in separate repos, such as `langchain-ai/langchain-google` and `langchain-ai/langchain-aws`. Usually these repos are cloned at the same level as this monorepo, so if needed, you can refer to their code directly by navigating to `../langchain-google/` from this monorepo.
|
||||
- **Testing layer** (`standard-tests/`): Standardized integration tests for partner integrations
|
||||
|
||||
```python
|
||||
def get_user(user_id: str, verbose: bool = False) -> User:
|
||||
"""Retrieve user by ID with optional verbose output."""
|
||||
pass
|
||||
```
|
||||
### Development tools & commands**
|
||||
|
||||
**Before making ANY changes to public APIs:**
|
||||
- `uv` – Fast Python package installer and resolver (replaces pip/poetry)
|
||||
- `make` – Task runner for common development commands. Feel free to look at the `Makefile` for available commands and usage patterns.
|
||||
- `ruff` – Fast Python linter and formatter
|
||||
- `mypy` – Static type checking
|
||||
- `pytest` – Testing framework
|
||||
|
||||
- Check if the function/class is exported in `__init__.py`
|
||||
- Look for existing usage patterns in tests and examples
|
||||
- Use keyword-only arguments for new parameters: `*, new_param: str = "default"`
|
||||
- Mark experimental features clearly with docstring warnings (using MkDocs Material admonitions, like `!!! warning`)
|
||||
This monorepo uses `uv` for dependency management. Local development uses editable installs: `[tool.uv.sources]`
|
||||
|
||||
🧠 *Ask yourself:* "Would this change break someone's code if they used it last week?"
|
||||
|
||||
### 2. Code Quality Standards
|
||||
|
||||
**All Python code MUST include type hints and return types.**
|
||||
|
||||
❌ **Bad:**
|
||||
|
||||
```python
|
||||
def p(u, d):
|
||||
return [x for x in u if x not in d]
|
||||
```
|
||||
|
||||
✅ **Good:**
|
||||
|
||||
```python
|
||||
def filter_unknown_users(users: list[str], known_users: set[str]) -> list[str]:
|
||||
"""Filter out users that are not in the known users set.
|
||||
|
||||
Args:
|
||||
users: List of user identifiers to filter.
|
||||
known_users: Set of known/valid user identifiers.
|
||||
|
||||
Returns:
|
||||
List of users that are not in the known_users set.
|
||||
"""
|
||||
return [user for user in users if user not in known_users]
|
||||
```
|
||||
|
||||
**Style Requirements:**
|
||||
|
||||
- Use descriptive, **self-explanatory variable names**. Avoid overly short or cryptic identifiers.
|
||||
- Attempt to break up complex functions (>20 lines) into smaller, focused functions where it makes sense
|
||||
- Avoid unnecessary abstraction or premature optimization
|
||||
- Follow existing patterns in the codebase you're modifying
|
||||
|
||||
### 3. Testing Requirements
|
||||
|
||||
**Every new feature or bugfix MUST be covered by unit tests.**
|
||||
|
||||
**Test Organization:**
|
||||
|
||||
- Unit tests: `tests/unit_tests/` (no network calls allowed)
|
||||
- Integration tests: `tests/integration_tests/` (network calls permitted)
|
||||
- Use `pytest` as the testing framework
|
||||
|
||||
**Test Quality Checklist:**
|
||||
|
||||
- [ ] Tests fail when your new logic is broken
|
||||
- [ ] Happy path is covered
|
||||
- [ ] Edge cases and error conditions are tested
|
||||
- [ ] Use fixtures/mocks for external dependencies
|
||||
- [ ] Tests are deterministic (no flaky tests)
|
||||
|
||||
Checklist questions:
|
||||
|
||||
- [ ] Does the test suite fail if your new logic is broken?
|
||||
- [ ] Are all expected behaviors exercised (happy path, invalid input, etc)?
|
||||
- [ ] Do tests use fixtures or mocks where needed?
|
||||
|
||||
```python
|
||||
def test_filter_unknown_users():
|
||||
"""Test filtering unknown users from a list."""
|
||||
users = ["alice", "bob", "charlie"]
|
||||
known_users = {"alice", "bob"}
|
||||
|
||||
result = filter_unknown_users(users, known_users)
|
||||
|
||||
assert result == ["charlie"]
|
||||
assert len(result) == 1
|
||||
```
|
||||
|
||||
### 4. Security and Risk Assessment
|
||||
|
||||
**Security Checklist:**
|
||||
|
||||
- No `eval()`, `exec()`, or `pickle` on user-controlled input
|
||||
- Proper exception handling (no bare `except:`) and use a `msg` variable for error messages
|
||||
- Remove unreachable/commented code before committing
|
||||
- Race conditions or resource leaks (file handles, sockets, threads).
|
||||
- Ensure proper resource cleanup (file handles, connections)
|
||||
|
||||
❌ **Bad:**
|
||||
|
||||
```python
|
||||
def load_config(path):
|
||||
with open(path) as f:
|
||||
return eval(f.read()) # ⚠️ Never eval config
|
||||
```
|
||||
|
||||
✅ **Good:**
|
||||
|
||||
```python
|
||||
import json
|
||||
|
||||
def load_config(path: str) -> dict:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
```
|
||||
|
||||
### 5. Documentation Standards
|
||||
|
||||
**Use Google-style docstrings with Args section for all public functions.**
|
||||
|
||||
❌ **Insufficient Documentation:**
|
||||
|
||||
```python
|
||||
def send_email(to, msg):
|
||||
"""Send an email to a recipient."""
|
||||
```
|
||||
|
||||
✅ **Complete Documentation:**
|
||||
|
||||
```python
|
||||
def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
"""
|
||||
Send an email to a recipient with specified priority.
|
||||
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level (``'low'``, ``'normal'``, ``'high'``).
|
||||
|
||||
Returns:
|
||||
True if email was sent successfully, False otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
**Documentation Guidelines:**
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
|
||||
📌 *Tip:* Keep descriptions concise but clear. Only document return values if non-obvious.
|
||||
|
||||
### 6. Architectural Improvements
|
||||
|
||||
**When you encounter code that could be improved, suggest better designs:**
|
||||
|
||||
❌ **Poor Design:**
|
||||
|
||||
```python
|
||||
def process_data(data, db_conn, email_client, logger):
|
||||
# Function doing too many things
|
||||
validated = validate_data(data)
|
||||
result = db_conn.save(validated)
|
||||
email_client.send_notification(result)
|
||||
logger.log(f"Processed {len(data)} items")
|
||||
return result
|
||||
```
|
||||
|
||||
✅ **Better Design:**
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class ProcessingResult:
|
||||
"""Result of data processing operation."""
|
||||
items_processed: int
|
||||
success: bool
|
||||
errors: List[str] = field(default_factory=list)
|
||||
|
||||
class DataProcessor:
|
||||
"""Handles data validation, storage, and notification."""
|
||||
|
||||
def __init__(self, db_conn: Database, email_client: EmailClient):
|
||||
self.db = db_conn
|
||||
self.email = email_client
|
||||
|
||||
def process(self, data: List[dict]) -> ProcessingResult:
|
||||
"""Process and store data with notifications."""
|
||||
validated = self._validate_data(data)
|
||||
result = self.db.save(validated)
|
||||
self._notify_completion(result)
|
||||
return result
|
||||
```
|
||||
|
||||
**Design Improvement Areas:**
|
||||
|
||||
If there's a **cleaner**, **more scalable**, or **simpler** design, highlight it and suggest improvements that would:
|
||||
|
||||
- Reduce code duplication through shared utilities
|
||||
- Make unit testing easier
|
||||
- Improve separation of concerns (single responsibility)
|
||||
- Make unit testing easier through dependency injection
|
||||
- Add clarity without adding complexity
|
||||
- Prefer dataclasses for structured data
|
||||
|
||||
## Development Tools & Commands
|
||||
|
||||
### Package Management
|
||||
|
||||
```bash
|
||||
# Add package
|
||||
uv add package-name
|
||||
|
||||
# Sync project dependencies
|
||||
uv sync
|
||||
uv lock
|
||||
```
|
||||
|
||||
### Testing
|
||||
Each package in `libs/` has its own `pyproject.toml` and `uv.lock`.
|
||||
|
||||
```bash
|
||||
# Run unit tests (no network)
|
||||
make test
|
||||
|
||||
# Don't run integration tests, as API keys must be set
|
||||
|
||||
# Run specific test file
|
||||
uv run --group test pytest tests/unit_tests/test_specific.py
|
||||
```
|
||||
|
||||
### Code Quality
|
||||
|
||||
```bash
|
||||
# Lint code
|
||||
make lint
|
||||
@@ -259,66 +64,118 @@ make format
|
||||
uv run --group lint mypy .
|
||||
```
|
||||
|
||||
### Dependency Management Patterns
|
||||
#### Key config files
|
||||
|
||||
**Local Development Dependencies:**
|
||||
- pyproject.toml: Main workspace configuration with dependency groups
|
||||
- uv.lock: Locked dependencies for reproducible builds
|
||||
- Makefile: Development tasks
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
langchain-core = { path = "../core", editable = true }
|
||||
langchain-tests = { path = "../standard-tests", editable = true }
|
||||
```
|
||||
#### Commit standards
|
||||
|
||||
**For tools, use the `@tool` decorator from `langchain_core.tools`:**
|
||||
Suggest PR titles that follow Conventional Commits format. Refer to .github/workflows/pr_lint for allowed types and scopes.
|
||||
|
||||
```python
|
||||
from langchain_core.tools import tool
|
||||
#### Pull request guidelines
|
||||
|
||||
@tool
|
||||
def search_database(query: str) -> str:
|
||||
"""Search the database for relevant information.
|
||||
- Always add a disclaimer to the PR description mentioning how AI agents are involved with the contribution.
|
||||
- Describe the "why" of the changes, why the proposed solution is the right one. Limit prose.
|
||||
- Highlight areas of the proposed changes that require careful review.
|
||||
|
||||
## Core development principles
|
||||
|
||||
### Maintain stable public interfaces
|
||||
|
||||
CRITICAL: Always attempt to preserve function signatures, argument positions, and names for exported/public methods. Do not make breaking changes.
|
||||
|
||||
**Before making ANY changes to public APIs:**
|
||||
|
||||
- Check if the function/class is exported in `__init__.py`
|
||||
- Look for existing usage patterns in tests and examples
|
||||
- Use keyword-only arguments for new parameters: `*, new_param: str = "default"`
|
||||
- Mark experimental features clearly with docstring warnings (using MkDocs Material admonitions, like `!!! warning`)
|
||||
|
||||
Ask: "Would this change break someone's code if they used it last week?"
|
||||
|
||||
### Code quality standards
|
||||
|
||||
All Python code MUST include type hints and return types.
|
||||
|
||||
```python title="Example"
|
||||
def filter_unknown_users(users: list[str], known_users: set[str]) -> list[str]:
|
||||
"""Single line description of the function.
|
||||
|
||||
Any additional context about the function can go here.
|
||||
|
||||
Args:
|
||||
query: The search query string.
|
||||
users: List of user identifiers to filter.
|
||||
known_users: Set of known/valid user identifiers.
|
||||
|
||||
Returns:
|
||||
List of users that are not in the known_users set.
|
||||
"""
|
||||
# Implementation here
|
||||
return results
|
||||
```
|
||||
|
||||
## Commit Standards
|
||||
- Use descriptive, self-explanatory variable names.
|
||||
- Follow existing patterns in the codebase you're modifying
|
||||
- Attempt to break up complex functions (>20 lines) into smaller, focused functions where it makes sense
|
||||
|
||||
**Use Conventional Commits format for PR titles:**
|
||||
### Testing requirements
|
||||
|
||||
- `feat(core): add multi-tenant support`
|
||||
- `fix(cli): resolve flag parsing error`
|
||||
- `docs: update API usage examples`
|
||||
- `docs(openai): update API usage examples`
|
||||
Every new feature or bugfix MUST be covered by unit tests.
|
||||
|
||||
## Framework-Specific Guidelines
|
||||
- Unit tests: `tests/unit_tests/` (no network calls allowed)
|
||||
- Integration tests: `tests/integration_tests/` (network calls permitted)
|
||||
- We use `pytest` as the testing framework; if in doubt, check other existing tests for examples.
|
||||
- The testing file structure should mirror the source code structure.
|
||||
|
||||
- Follow the existing patterns in `langchain-core` for base abstractions
|
||||
- Use `langchain_core.callbacks` for execution tracking
|
||||
- Implement proper streaming support where applicable
|
||||
- Avoid deprecated components like legacy `LLMChain`
|
||||
**Checklist:**
|
||||
|
||||
### Partner Integrations
|
||||
- [ ] Tests fail when your new logic is broken
|
||||
- [ ] Happy path is covered
|
||||
- [ ] Edge cases and error conditions are tested
|
||||
- [ ] Use fixtures/mocks for external dependencies
|
||||
- [ ] Tests are deterministic (no flaky tests)
|
||||
- [ ] Does the test suite fail if your new logic is broken?
|
||||
|
||||
- Follow the established patterns in existing partner libraries
|
||||
- Implement standard interfaces (`BaseChatModel`, `BaseEmbeddings`, etc.)
|
||||
- Include comprehensive integration tests
|
||||
- Document API key requirements and authentication
|
||||
### Security and risk assessment
|
||||
|
||||
---
|
||||
- No `eval()`, `exec()`, or `pickle` on user-controlled input
|
||||
- Proper exception handling (no bare `except:`) and use a `msg` variable for error messages
|
||||
- Remove unreachable/commented code before committing
|
||||
- Race conditions or resource leaks (file handles, sockets, threads).
|
||||
- Ensure proper resource cleanup (file handles, connections)
|
||||
|
||||
## Quick Reference Checklist
|
||||
### Documentation standards
|
||||
|
||||
Before submitting code changes:
|
||||
Use Google-style docstrings with Args section for all public functions.
|
||||
|
||||
- [ ] **Breaking Changes**: Verified no public API changes
|
||||
- [ ] **Type Hints**: All functions have complete type annotations
|
||||
- [ ] **Tests**: New functionality is fully tested
|
||||
- [ ] **Security**: No dangerous patterns (eval, silent failures, etc.)
|
||||
- [ ] **Documentation**: Google-style docstrings for public functions
|
||||
- [ ] **Code Quality**: `make lint` and `make format` pass
|
||||
- [ ] **Architecture**: Suggested improvements where applicable
|
||||
- [ ] **Commit Message**: Follows Conventional Commits format
|
||||
```python title="Example"
|
||||
def send_email(to: str, msg: str, *, priority: str = "normal") -> bool:
|
||||
"""Send an email to a recipient with specified priority.
|
||||
|
||||
Any additional context about the function can go here.
|
||||
|
||||
Args:
|
||||
to: The email address of the recipient.
|
||||
msg: The message body to send.
|
||||
priority: Email priority level.
|
||||
|
||||
Returns:
|
||||
`True` if email was sent successfully, `False` otherwise.
|
||||
|
||||
Raises:
|
||||
InvalidEmailError: If the email address format is invalid.
|
||||
SMTPConnectionError: If unable to connect to email server.
|
||||
"""
|
||||
```
|
||||
|
||||
- Types go in function signatures, NOT in docstrings
|
||||
- If a default is present, DO NOT repeat it in the docstring unless there is post-processing or it is set conditionally.
|
||||
- Focus on "why" rather than "what" in descriptions
|
||||
- Document all parameters, return values, and exceptions
|
||||
- Keep descriptions concise but clear
|
||||
- Ensure American English spelling (e.g., "behavior", not "behaviour")
|
||||
|
||||
## Additional resources
|
||||
|
||||
- **Documentation:** https://docs.langchain.com/oss/python/langchain/overview and source at https://github.com/langchain-ai/docs or `../docs/`. Prefer the local install and use file search tools for best results. If needed, use the docs MCP server as defined in `.mcp.json` for programmatic access.
|
||||
- **Contributing Guide:** [`.github/CONTRIBUTING.md`](https://docs.langchain.com/oss/python/contributing/overview)
|
||||
|
||||
11
MIGRATE.md
11
MIGRATE.md
@@ -1,11 +0,0 @@
|
||||
# Migrating
|
||||
|
||||
Please see the following guides for migrating LangChain code:
|
||||
|
||||
* Migrate to [LangChain v0.3](https://python.langchain.com/docs/versions/v0_3/)
|
||||
* Migrate to [LangChain v0.2](https://python.langchain.com/docs/versions/v0_2/)
|
||||
* Migrating from [LangChain 0.0.x Chains](https://python.langchain.com/docs/versions/migrating_chains/)
|
||||
* Upgrade to [LangGraph Memory](https://python.langchain.com/docs/versions/migrating_memory/)
|
||||
|
||||
The [LangChain CLI](https://python.langchain.com/docs/versions/v0_3/#migrate-using-langchain-cli) can help you automatically upgrade your code to use non-deprecated imports.
|
||||
This will be especially helpful if you're still on either version 0.0.x or 0.1.x of LangChain.
|
||||
93
README.md
93
README.md
@@ -1,47 +1,43 @@
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: light)" srcset=".github/images/logo-dark.svg">
|
||||
<source media="(prefers-color-scheme: dark)" srcset=".github/images/logo-light.svg">
|
||||
<img alt="LangChain Logo" src=".github/images/logo-dark.svg" width="80%">
|
||||
</picture>
|
||||
</p>
|
||||
<div align="center">
|
||||
<a href="https://www.langchain.com/">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: light)" srcset=".github/images/logo-dark.svg">
|
||||
<source media="(prefers-color-scheme: dark)" srcset=".github/images/logo-light.svg">
|
||||
<img alt="LangChain Logo" src=".github/images/logo-dark.svg" width="80%">
|
||||
</picture>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
The platform for reliable agents.
|
||||
</p>
|
||||
<div align="center">
|
||||
<h3>The platform for reliable agents.</h3>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://opensource.org/licenses/MIT" target="_blank">
|
||||
<img src="https://img.shields.io/pypi/l/langchain-core?style=flat-square" alt="PyPI - License">
|
||||
</a>
|
||||
<a href="https://pypistats.org/packages/langchain-core" target="_blank">
|
||||
<img src="https://img.shields.io/pepy/dt/langchain" alt="PyPI - Downloads">
|
||||
</a>
|
||||
<a href="https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square" alt="Open in Dev Containers">
|
||||
</a>
|
||||
<a href="https://codespaces.new/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://github.com/codespaces/badge.svg" alt="Open in Github Codespace" title="Open in Github Codespace" width="150" height="20">
|
||||
</a>
|
||||
<a href="https://codspeed.io/langchain-ai/langchain" target="_blank">
|
||||
<img src="https://img.shields.io/endpoint?url=https://codspeed.io/badge.json" alt="CodSpeed Badge">
|
||||
</a>
|
||||
<a href="https://twitter.com/langchainai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI" alt="Twitter / X">
|
||||
</a>
|
||||
</p>
|
||||
<div align="center">
|
||||
<a href="https://opensource.org/licenses/MIT" target="_blank"><img src="https://img.shields.io/pypi/l/langchain" alt="PyPI - License"></a>
|
||||
<a href="https://pypistats.org/packages/langchain" target="_blank"><img src="https://img.shields.io/pepy/dt/langchain" alt="PyPI - Downloads"></a>
|
||||
<a href="https://pypi.org/project/langchain/#history" target="_blank"><img src="https://img.shields.io/pypi/v/langchain?label=%20" alt="Version"></a>
|
||||
<a href="https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain" target="_blank"><img src="https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode" alt="Open in Dev Containers"></a>
|
||||
<a href="https://codespaces.new/langchain-ai/langchain" target="_blank"><img src="https://github.com/codespaces/badge.svg" alt="Open in Github Codespace" title="Open in Github Codespace" width="150" height="20"></a>
|
||||
<a href="https://codspeed.io/langchain-ai/langchain" target="_blank"><img src="https://img.shields.io/endpoint?url=https://codspeed.io/badge.json" alt="CodSpeed Badge"></a>
|
||||
<a href="https://twitter.com/langchainai" target="_blank"><img src="https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI" alt="Twitter / X"></a>
|
||||
</div>
|
||||
|
||||
LangChain is a framework for building LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development — all while future-proofing decisions as the underlying technology evolves.
|
||||
LangChain is a framework for building agents and LLM-powered applications. It helps you chain together interoperable components and third-party integrations to simplify AI application development – all while future-proofing decisions as the underlying technology evolves.
|
||||
|
||||
```bash
|
||||
pip install -U langchain
|
||||
pip install langchain
|
||||
```
|
||||
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our framework for building controllable agent workflows.
|
||||
|
||||
---
|
||||
|
||||
**Documentation**: To learn more about LangChain, check out [the docs](https://docs.langchain.com/).
|
||||
**Documentation**:
|
||||
|
||||
If you're looking for more advanced customization or agent orchestration, check out [LangGraph](https://langchain-ai.github.io/langgraph/), our framework for building controllable agent workflows.
|
||||
- [docs.langchain.com](https://docs.langchain.com/oss/python/langchain/overview) – Comprehensive documentation, including conceptual overviews and guides
|
||||
- [reference.langchain.com/python](https://reference.langchain.com/python) – API reference docs for LangChain packages
|
||||
|
||||
**Discussions**: Visit the [LangChain Forum](https://forum.langchain.com) to connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
|
||||
> [!NOTE]
|
||||
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
@@ -52,26 +48,27 @@ LangChain helps developers build applications powered by LLMs through a standard
|
||||
|
||||
Use LangChain for:
|
||||
|
||||
- **Real-time data augmentation**. Easily connect LLMs to diverse data sources and external/internal systems, drawing from LangChain’s vast library of integrations with model providers, tools, vector stores, retrievers, and more.
|
||||
- **Model interoperability**. Swap models in and out as your engineering team experiments to find the best choice for your application’s needs. As the industry frontier evolves, adapt quickly — LangChain’s abstractions keep you moving without losing momentum.
|
||||
- **Real-time data augmentation**. Easily connect LLMs to diverse data sources and external/internal systems, drawing from LangChain's vast library of integrations with model providers, tools, vector stores, retrievers, and more.
|
||||
- **Model interoperability**. Swap models in and out as your engineering team experiments to find the best choice for your application's needs. As the industry frontier evolves, adapt quickly – LangChain's abstractions keep you moving without losing momentum.
|
||||
- **Rapid prototyping**. Quickly build and iterate on LLM applications with LangChain's modular, component-based architecture. Test different approaches and workflows without rebuilding from scratch, accelerating your development cycle.
|
||||
- **Production-ready features**. Deploy reliable applications with built-in support for monitoring, evaluation, and debugging through integrations like LangSmith. Scale with confidence using battle-tested patterns and best practices.
|
||||
- **Vibrant community and ecosystem**. Leverage a rich ecosystem of integrations, templates, and community-contributed components. Benefit from continuous improvements and stay up-to-date with the latest AI developments through an active open-source community.
|
||||
- **Flexible abstraction layers**. Work at the level of abstraction that suits your needs - from high-level chains for quick starts to low-level components for fine-grained control. LangChain grows with your application's complexity.
|
||||
|
||||
## LangChain’s ecosystem
|
||||
## LangChain ecosystem
|
||||
|
||||
While the LangChain framework can be used standalone, it also integrates seamlessly with any LangChain product, giving developers a full suite of tools when building LLM applications.
|
||||
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
- [LangSmith](https://www.langchain.com/langsmith) - Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows — and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams — and iterate quickly with visual prototyping in [LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
- [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview) – Build agents that can reliably handle complex tasks with LangGraph, our low-level agent orchestration framework. LangGraph offers customizable architecture, long-term memory, and human-in-the-loop workflows – and is trusted in production by companies like LinkedIn, Uber, Klarna, and GitLab.
|
||||
- [Integrations](https://docs.langchain.com/oss/python/integrations/providers/overview) – List of LangChain integrations, including chat & embedding models, tools & toolkits, and more
|
||||
- [LangSmith](https://www.langchain.com/langsmith) – Helpful for agent evals and observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain visibility in production, and improve performance over time.
|
||||
- [LangSmith Deployment](https://docs.langchain.com/langsmith/deployments) – Deploy and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across teams – and iterate quickly with visual prototyping in [LangSmith Studio](https://docs.langchain.com/langsmith/studio).
|
||||
- [Deep Agents](https://github.com/langchain-ai/deepagents) *(new!)* – Build agents that can plan, use subagents, and leverage file systems for complex tasks
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Conceptual Guides](https://docs.langchain.com/oss/python/langchain/overview): Explanations of key
|
||||
concepts behind the LangChain framework.
|
||||
- [Tutorials](https://docs.langchain.com/oss/python/learn): Simple walkthroughs with
|
||||
guided examples on getting started with LangChain.
|
||||
- [API Reference](https://reference.langchain.com/python/): Detailed reference on
|
||||
navigating base packages and integrations for LangChain.
|
||||
- [LangChain Forum](https://forum.langchain.com/): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation.
|
||||
- [API Reference](https://reference.langchain.com/python) – Detailed reference on navigating base packages and integrations for LangChain.
|
||||
- [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview) – Learn how to contribute to LangChain projects and find good first issues.
|
||||
- [Code of Conduct](https://github.com/langchain-ai/langchain/blob/master/.github/CODE_OF_CONDUCT.md) – Our community guidelines and standards for participation.
|
||||
|
||||
80
SECURITY.md
80
SECURITY.md
@@ -1,80 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
LangChain has a large ecosystem of integrations with various external resources like local and remote file systems, APIs and databases. These integrations allow developers to create versatile applications that combine the power of LLMs with the ability to access, interact with and manipulate external resources.
|
||||
|
||||
## Best practices
|
||||
|
||||
When building such applications, developers should remember to follow good security practices:
|
||||
|
||||
* [**Limit Permissions**](https://en.wikipedia.org/wiki/Principle_of_least_privilege): Scope permissions specifically to the application's need. Granting broad or excessive permissions can introduce significant security vulnerabilities. To avoid such vulnerabilities, consider using read-only credentials, disallowing access to sensitive resources, using sandboxing techniques (such as running inside a container), specifying proxy configurations to control external requests, etc., as appropriate for your application.
|
||||
* **Anticipate Potential Misuse**: Just as humans can err, so can Large Language Models (LLMs). Always assume that any system access or credentials may be used in any way allowed by the permissions they are assigned. For example, if a pair of database credentials allows deleting data, it's safest to assume that any LLM able to use those credentials may in fact delete data.
|
||||
* [**Defense in Depth**](https://en.wikipedia.org/wiki/Defense_in_depth_(computing)): No security technique is perfect. Fine-tuning and good chain design can reduce, but not eliminate, the odds that a Large Language Model (LLM) may make a mistake. It's best to combine multiple layered security approaches rather than relying on any single layer of defense to ensure security. For example: use both read-only permissions and sandboxing to ensure that LLMs are only able to access data that is explicitly meant for them to use.
|
||||
|
||||
Risks of not doing so include, but are not limited to:
|
||||
|
||||
* Data corruption or loss.
|
||||
* Unauthorized access to confidential information.
|
||||
* Compromised performance or availability of critical resources.
|
||||
|
||||
Example scenarios with mitigation strategies:
|
||||
|
||||
* A user may ask an agent with access to the file system to delete files that should not be deleted or read the content of files that contain sensitive information. To mitigate, limit the agent to only use a specific directory and only allow it to read or write files that are safe to read or write. Consider further sandboxing the agent by running it in a container.
|
||||
* A user may ask an agent with write access to an external API to write malicious data to the API, or delete data from that API. To mitigate, give the agent read-only API keys, or limit it to only use endpoints that are already resistant to such misuse.
|
||||
* A user may ask an agent with access to a database to drop a table or mutate the schema. To mitigate, scope the credentials to only the tables that the agent needs to access and consider issuing READ-ONLY credentials.
|
||||
|
||||
If you're building applications that access external resources like file systems, APIs or databases, consider speaking with your company's security team to determine how to best design and secure your applications.
|
||||
|
||||
## Reporting OSS Vulnerabilities
|
||||
|
||||
LangChain is partnered with [huntr by Protect AI](https://huntr.com/) to provide
|
||||
a bounty program for our open source projects.
|
||||
|
||||
Please report security vulnerabilities associated with the LangChain
|
||||
open source projects at [huntr](https://huntr.com/bounties/disclose/?target=https%3A%2F%2Fgithub.com%2Flangchain-ai%2Flangchain&validSearch=true).
|
||||
|
||||
Before reporting a vulnerability, please review:
|
||||
|
||||
1) In-Scope Targets and Out-of-Scope Targets below.
|
||||
2) The [langchain-ai/langchain](https://docs.langchain.com/oss/python/contributing/code#repository-structure) monorepo structure.
|
||||
3) The [Best Practices](#best-practices) above to understand what we consider to be a security vulnerability vs. developer responsibility.
|
||||
|
||||
### In-Scope Targets
|
||||
|
||||
The following packages and repositories are eligible for bug bounties:
|
||||
|
||||
* langchain-core
|
||||
* langchain (see exceptions)
|
||||
* langchain-community (see exceptions)
|
||||
* langgraph
|
||||
* langserve
|
||||
|
||||
### Out of Scope Targets
|
||||
|
||||
All out of scope targets defined by huntr as well as:
|
||||
|
||||
* **langchain-experimental**: This repository is for experimental code and is not
|
||||
eligible for bug bounties (see [package warning](https://pypi.org/project/langchain-experimental/)), bug reports to it will be marked as interesting or waste of
|
||||
time and published with no bounty attached.
|
||||
* **tools**: Tools in either langchain or langchain-community are not eligible for bug
|
||||
bounties. This includes the following directories
|
||||
* libs/langchain/langchain/tools
|
||||
* libs/community/langchain_community/tools
|
||||
* Please review the [Best Practices](#best-practices)
|
||||
for more details, but generally tools interact with the real world. Developers are
|
||||
expected to understand the security implications of their code and are responsible
|
||||
for the security of their tools.
|
||||
* Code documented with security notices. This will be decided on a case-by-case basis, but likely will not be eligible for a bounty as the code is already
|
||||
documented with guidelines for developers that should be followed for making their
|
||||
application secure.
|
||||
* Any LangSmith related repositories or APIs (see [Reporting LangSmith Vulnerabilities](#reporting-langsmith-vulnerabilities)).
|
||||
|
||||
## Reporting LangSmith Vulnerabilities
|
||||
|
||||
Please report security vulnerabilities associated with LangSmith by email to `security@langchain.dev`.
|
||||
|
||||
* LangSmith site: [https://smith.langchain.com](https://smith.langchain.com)
|
||||
* SDK client: [https://github.com/langchain-ai/langsmith-sdk](https://github.com/langchain-ai/langsmith-sdk)
|
||||
|
||||
### Other Security Concerns
|
||||
|
||||
For any other security concerns, please contact us at `security@langchain.dev`.
|
||||
@@ -1,932 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "BYejgj8Zf-LG",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Getting started with LangChain and Gemma, running locally or in the Cloud"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "2IxjMb9-jIJ8"
|
||||
},
|
||||
"source": [
|
||||
"### Installing dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"executionInfo": {
|
||||
"elapsed": 9436,
|
||||
"status": "ok",
|
||||
"timestamp": 1708975187360,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "XZaTsXfcheTF",
|
||||
"outputId": "eb21d603-d824-46c5-f99f-087fb2f618b1",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install --upgrade langchain langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "IXmAujvC3Kwp"
|
||||
},
|
||||
"source": [
|
||||
"### Running the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "CI8Elyc5gBQF"
|
||||
},
|
||||
"source": [
|
||||
"Go to the VertexAI Model Garden on Google Cloud [console](https://pantheon.corp.google.com/vertex-ai/publishers/google/model-garden/335), and deploy the desired version of Gemma to VertexAI. It will take a few minutes, and after the endpoint is ready, you need to copy its number."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"id": "gv1j8FrVftsC"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Basic parameters\n",
|
||||
"project: str = \"PUT_YOUR_PROJECT_ID_HERE\" # @param {type:\"string\"}\n",
|
||||
"endpoint_id: str = \"PUT_YOUR_ENDPOINT_ID_HERE\" # @param {type:\"string\"}\n",
|
||||
"location: str = \"PUT_YOUR_ENDPOINT_LOCAtION_HERE\" # @param {type:\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 3,
|
||||
"status": "ok",
|
||||
"timestamp": 1708975440503,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "bhIHsFGYjtFt",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 17:15:10.457149: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-02-27 17:15:10.508925: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
||||
"2024-02-27 17:15:10.508957: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
||||
"2024-02-27 17:15:10.510289: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
||||
"2024-02-27 17:15:10.518898: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import (\n",
|
||||
" GemmaChatVertexAIModelGarden,\n",
|
||||
" GemmaVertexAIModelGarden,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 351,
|
||||
"status": "ok",
|
||||
"timestamp": 1708975440852,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "WJv-UVWwh0lk",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = GemmaVertexAIModelGarden(\n",
|
||||
" endpoint_id=endpoint_id,\n",
|
||||
" project=project,\n",
|
||||
" location=location,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"executionInfo": {
|
||||
"elapsed": 714,
|
||||
"status": "ok",
|
||||
"timestamp": 1708975441564,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "6kM7cEFdiN9h",
|
||||
"outputId": "fb420c56-5614-4745-cda8-0ee450a3e539",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prompt:\n",
|
||||
"What is the meaning of life?\n",
|
||||
"Output:\n",
|
||||
" Who am I? Why do I exist? These are questions I have struggled with\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = llm.invoke(\"What is the meaning of life?\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "zzep9nfmuUcO"
|
||||
},
|
||||
"source": [
|
||||
"We can also use Gemma as a multi-turn chat model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"executionInfo": {
|
||||
"elapsed": 964,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976298189,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "8tPHoM5XiZOl",
|
||||
"outputId": "7b8fb652-9aed-47b0-c096-aa1abfc3a2a9",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='Prompt:\\n<start_of_turn>user\\nHow much is 2+2?<end_of_turn>\\n<start_of_turn>model\\nOutput:\\n8-years old.<end_of_turn>\\n\\n<start_of'\n",
|
||||
"content='Prompt:\\n<start_of_turn>user\\nHow much is 2+2?<end_of_turn>\\n<start_of_turn>model\\nPrompt:\\n<start_of_turn>user\\nHow much is 2+2?<end_of_turn>\\n<start_of_turn>model\\nOutput:\\n8-years old.<end_of_turn>\\n\\n<start_of<end_of_turn>\\n<start_of_turn>user\\nHow much is 3+3?<end_of_turn>\\n<start_of_turn>model\\nOutput:\\nOutput:\\n3-years old.<end_of_turn>\\n\\n<'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"llm = GemmaChatVertexAIModelGarden(\n",
|
||||
" endpoint_id=endpoint_id,\n",
|
||||
" project=project,\n",
|
||||
" location=location,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"message1 = HumanMessage(content=\"How much is 2+2?\")\n",
|
||||
"answer1 = llm.invoke([message1])\n",
|
||||
"print(answer1)\n",
|
||||
"\n",
|
||||
"message2 = HumanMessage(content=\"How much is 3+3?\")\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2])\n",
|
||||
"\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can post-process response to avoid repetitions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='Output:\\n<<humming>>: 2+2 = 4.\\n<end'\n",
|
||||
"content='Output:\\nOutput:\\n<<humming>>: 3+3 = 6.'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"answer1 = llm.invoke([message1], parse_response=True)\n",
|
||||
"print(answer1)\n",
|
||||
"\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], parse_response=True)\n",
|
||||
"\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "VEfjqo7fjARR"
|
||||
},
|
||||
"source": [
|
||||
"## Running Gemma locally from Kaggle"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "gVW8QDzHu7TA"
|
||||
},
|
||||
"source": [
|
||||
"In order to run Gemma locally, you can download it from Kaggle first. In order to do this, you'll need to login into the Kaggle platform, create a API key and download a `kaggle.json` Read more about Kaggle auth [here](https://www.kaggle.com/docs/api)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "S1EsXQ3XvZkQ"
|
||||
},
|
||||
"source": [
|
||||
"### Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 335,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976305471,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "p8SMwpKRvbef",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/opt/conda/lib/python3.10/pty.py:89: RuntimeWarning: os.fork() was called. os.fork() is incompatible with multithreaded code, and JAX is multithreaded, so this will likely lead to a deadlock.\n",
|
||||
" pid, fd = os.forkpty()\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!mkdir -p ~/.kaggle && cp kaggle.json ~/.kaggle/kaggle.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 7802,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976363010,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "Yr679aePv9Fq",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/opt/conda/lib/python3.10/pty.py:89: RuntimeWarning: os.fork() was called. os.fork() is incompatible with multithreaded code, and JAX is multithreaded, so this will likely lead to a deadlock.\n",
|
||||
" pid, fd = os.forkpty()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
|
||||
"tensorstore 0.1.54 requires ml-dtypes>=0.3.1, but you have ml-dtypes 0.2.0 which is incompatible.\u001b[0m\u001b[31m\n",
|
||||
"\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install keras>=3 keras_nlp"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "E9zn8nYpv3QZ"
|
||||
},
|
||||
"source": [
|
||||
"### Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 8536,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976601206,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "0LFRmY8TjCkI",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:38:40.797559: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-02-27 16:38:40.848444: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
||||
"2024-02-27 16:38:40.848478: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
||||
"2024-02-27 16:38:40.849728: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
||||
"2024-02-27 16:38:40.857936: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import GemmaLocalKaggle"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "v-o7oXVavdMQ"
|
||||
},
|
||||
"source": [
|
||||
"You can specify the keras backend (by default it's `tensorflow`, but you can change it be `jax` or `torch`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 9,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976601206,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "vvTUH8DNj5SF",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Basic parameters\n",
|
||||
"keras_backend: str = \"jax\" # @param {type:\"string\"}\n",
|
||||
"model_name: str = \"gemma_2b_en\" # @param {type:\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 40836,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976761257,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "YOmrqxo5kHXK",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:23:14.661164: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 20549 MB memory: -> device: 0, name: NVIDIA L4, pci bus id: 0000:00:03.0, compute capability: 8.9\n",
|
||||
"normalizer.cc(51) LOG(INFO) precompiled_charsmap is empty. use identity normalization.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = GemmaLocalKaggle(model_name=model_name, keras_backend=keras_backend)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"id": "Zu6yPDUgkQtQ",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"W0000 00:00:1709051129.518076 774855 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"What is the meaning of life?\n",
|
||||
"\n",
|
||||
"The question is one of the most important questions in the world.\n",
|
||||
"\n",
|
||||
"It’s the question that has\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = llm.invoke(\"What is the meaning of life?\", max_tokens=30)\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ChatModel"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "MSctpRE4u43N"
|
||||
},
|
||||
"source": [
|
||||
"Same as above, using Gemma locally as a multi-turn chat model. You might need to re-start the notebook and clean your GPU memory in order to avoid OOM errors:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:58:22.331067: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-02-27 16:58:22.382948: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
||||
"2024-02-27 16:58:22.382978: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
||||
"2024-02-27 16:58:22.384312: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
||||
"2024-02-27 16:58:22.392767: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import GemmaChatLocalKaggle"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Basic parameters\n",
|
||||
"keras_backend: str = \"jax\" # @param {type:\"string\"}\n",
|
||||
"model_name: str = \"gemma_2b_en\" # @param {type:\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:58:29.001922: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 20549 MB memory: -> device: 0, name: NVIDIA L4, pci bus id: 0000:00:03.0, compute capability: 8.9\n",
|
||||
"normalizer.cc(51) LOG(INFO) precompiled_charsmap is empty. use identity normalization.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = GemmaChatLocalKaggle(model_name=model_name, keras_backend=keras_backend)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 3,
|
||||
"status": "aborted",
|
||||
"timestamp": 1708976382957,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "JrJmvZqwwLqj"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:58:49.848412: I external/local_xla/xla/service/service.cc:168] XLA service 0x55adc0cf2c10 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n",
|
||||
"2024-02-27 16:58:49.848458: I external/local_xla/xla/service/service.cc:176] StreamExecutor device (0): NVIDIA L4, Compute Capability 8.9\n",
|
||||
"2024-02-27 16:58:50.116614: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
|
||||
"2024-02-27 16:58:54.389324: I external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:454] Loaded cuDNN version 8900\n",
|
||||
"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
|
||||
"I0000 00:00:1709053145.225207 784891 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.\n",
|
||||
"W0000 00:00:1709053145.284227 784891 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\nI'm a model.\\n Tampoco\\nI'm a model.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"message1 = HumanMessage(content=\"Hi! Who are you?\")\n",
|
||||
"answer1 = llm.invoke([message1], max_tokens=30)\n",
|
||||
"print(answer1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\n<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\nI'm a model.\\n Tampoco\\nI'm a model.<end_of_turn>\\n<start_of_turn>user\\nWhat can you help me with?<end_of_turn>\\n<start_of_turn>model\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message2 = HumanMessage(content=\"What can you help me with?\")\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], max_tokens=60)\n",
|
||||
"\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can post-process the response if you want to avoid multi-turn statements:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"I'm a model.\\n Tampoco\\nI'm a model.\"\n",
|
||||
"content='I can help you with your modeling.\\n Tampoco\\nI can'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"answer1 = llm.invoke([message1], max_tokens=30, parse_response=True)\n",
|
||||
"print(answer1)\n",
|
||||
"\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], max_tokens=60, parse_response=True)\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "EiZnztso7hyF"
|
||||
},
|
||||
"source": [
|
||||
"## Running Gemma locally from HuggingFace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"id": "qqAqsz5R7nKf",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 17:02:21.832409: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-02-27 17:02:21.883625: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
||||
"2024-02-27 17:02:21.883656: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
||||
"2024-02-27 17:02:21.884987: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
||||
"2024-02-27 17:02:21.893340: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import GemmaChatLocalHF, GemmaLocalHF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "tsyntzI08cOr",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Basic parameters\n",
|
||||
"hf_access_token: str = \"PUT_YOUR_TOKEN_HERE\" # @param {type:\"string\"}\n",
|
||||
"model_name: str = \"google/gemma-2b\" # @param {type:\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"id": "JWrqEkOo8sm9",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a0d6de5542254ed1b6d3ba65465e050e",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = GemmaLocalHF(model_name=\"google/gemma-2b\", hf_access_token=hf_access_token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"id": "VX96Jf4Y84k-",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"What is the meaning of life?\n",
|
||||
"\n",
|
||||
"The question is one of the most important questions in the world.\n",
|
||||
"\n",
|
||||
"It’s the question that has been asked by philosophers, theologians, and scientists for centuries.\n",
|
||||
"\n",
|
||||
"And it’s the question that\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = llm.invoke(\"What is the meaning of life?\", max_tokens=50)\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Same as above, using Gemma locally as a multi-turn chat model. You might need to re-start the notebook and clean your GPU memory in order to avoid OOM errors:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"id": "9x-jmEBg9Mk1"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "c9a0b8e161d74a6faca83b1be96dee27",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = GemmaChatLocalHF(model_name=model_name, hf_access_token=hf_access_token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"id": "qv_OSaMm9PVy"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\nI'm a model.\\n<end_of_turn>\\n<start_of_turn>user\\nWhat do you mean\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"message1 = HumanMessage(content=\"Hi! Who are you?\")\n",
|
||||
"answer1 = llm.invoke([message1], max_tokens=60)\n",
|
||||
"print(answer1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\n<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\nI'm a model.\\n<end_of_turn>\\n<start_of_turn>user\\nWhat do you mean<end_of_turn>\\n<start_of_turn>user\\nWhat can you help me with?<end_of_turn>\\n<start_of_turn>model\\nI can help you with anything.\\n<\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message2 = HumanMessage(content=\"What can you help me with?\")\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], max_tokens=140)\n",
|
||||
"\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And the same with posprocessing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"I'm a model.\\n<end_of_turn>\\n\"\n",
|
||||
"content='I can help you with anything.\\n<end_of_turn>\\n<end_of_turn>\\n'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"answer1 = llm.invoke([message1], max_tokens=60, parse_response=True)\n",
|
||||
"print(answer1)\n",
|
||||
"\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], max_tokens=120, parse_response=True)\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"environment": {
|
||||
"kernel": "python3",
|
||||
"name": ".m116",
|
||||
"type": "gcloud",
|
||||
"uri": "gcr.io/deeplearning-platform-release/:m116"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,398 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "fc935871-7640-41c6-b798-58514d860fe0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## LLaMA2 chat with SQL\n",
|
||||
"\n",
|
||||
"Open source, local LLMs are great to consider for any application that demands data privacy.\n",
|
||||
"\n",
|
||||
"SQL is one good example. \n",
|
||||
"\n",
|
||||
"This cookbook shows how to perform text-to-SQL using various local versions of LLaMA2 run locally.\n",
|
||||
"\n",
|
||||
"## Packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "81adcf8b-395a-4f02-8749-ac976942b446",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain replicate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e13ed66-300b-4a23-b8ac-44df68ee4733",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## LLM\n",
|
||||
"\n",
|
||||
"There are a few ways to access LLaMA2.\n",
|
||||
"\n",
|
||||
"To run locally, we use Ollama.ai. \n",
|
||||
"\n",
|
||||
"See [here](/docs/integrations/chat/ollama) for details on installation and setup.\n",
|
||||
"\n",
|
||||
"Also, see [here](/docs/guides/development/local_llms) for our full guide on local LLMs.\n",
|
||||
" \n",
|
||||
"To use an external API, which is not private, we can use Replicate."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6a75a5c6-34ee-4ab9-a664-d9b432d812ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Init param `input` is deprecated, please use `model_kwargs` instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Local\n",
|
||||
"from langchain_ollama import ChatOllama\n",
|
||||
"\n",
|
||||
"llama2_chat = ChatOllama(model=\"llama2:13b-chat\")\n",
|
||||
"llama2_code = ChatOllama(model=\"codellama:7b-instruct\")\n",
|
||||
"\n",
|
||||
"# API\n",
|
||||
"from langchain_community.llms import Replicate\n",
|
||||
"\n",
|
||||
"# REPLICATE_API_TOKEN = getpass()\n",
|
||||
"# os.environ[\"REPLICATE_API_TOKEN\"] = REPLICATE_API_TOKEN\n",
|
||||
"replicate_id = \"meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d\"\n",
|
||||
"llama2_chat_replicate = Replicate(\n",
|
||||
" model=replicate_id, input={\"temperature\": 0.01, \"max_length\": 500, \"top_p\": 1}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "ce96f7ea-b3d5-44e1-9fa5-a79e04a9e1fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Simply set the LLM we want to use\n",
|
||||
"llm = llama2_chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "80222165-f353-4e35-a123-5f70fd70c6c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## DB\n",
|
||||
"\n",
|
||||
"Connect to a SQLite DB.\n",
|
||||
"\n",
|
||||
"To create this particular DB, you can use the code and follow the steps shown [here](https://github.com/facebookresearch/llama-recipes/blob/main/demo_apps/StructuredLlama.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "025bdd82-3bb1-4948-bc7c-c3ccd94fd05c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///nba_roster.db\", sample_rows_in_table_info=0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_schema(_):\n",
|
||||
" return db.get_table_info()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def run_query(query):\n",
|
||||
" return db.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "654b3577-baa2-4e12-a393-f40e5db49ac7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query a SQL Database \n",
|
||||
"\n",
|
||||
"Follow the runnables workflow [here](https://python.langchain.com/docs/expression_language/cookbook/sql_db)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "5a4933ea-d9c0-4b0a-8177-ba4490c6532b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' SELECT \"Team\" FROM nba_roster WHERE \"NAME\" = \\'Klay Thompson\\';'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Prompt\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"# Update the template based on the type of SQL Database like MySQL, Microsoft SQL Server and so on\n",
|
||||
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query:\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Given an input question, convert it to a SQL query. No pre-amble.\"),\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Chain to query\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"sql_response = (\n",
|
||||
" RunnablePassthrough.assign(schema=get_schema)\n",
|
||||
" | prompt\n",
|
||||
" | llm.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"sql_response.invoke({\"question\": \"What team is Klay Thompson on?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a0e9e2c8-9b88-4853-ac86-001bc6cc6695",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can review the results:\n",
|
||||
"\n",
|
||||
"* [LangSmith trace](https://smith.langchain.com/public/afa56a06-b4e2-469a-a60f-c1746e75e42b/r) LLaMA2-13 Replicate API\n",
|
||||
"* [LangSmith trace](https://smith.langchain.com/public/2d4ecc72-6b8f-4523-8f0b-ea95c6b54a1d/r) LLaMA2-13 local \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "2a2825e3-c1b6-4f7d-b9c9-d9835de323bb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Based on the table schema and SQL query, there are 30 unique teams in the NBA.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Chain to answer\n",
|
||||
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"Given an input question and SQL response, convert it to a natural language answer. No pre-amble.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(query=sql_response)\n",
|
||||
" | RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" response=lambda x: db.run(x[\"query\"]),\n",
|
||||
" )\n",
|
||||
" | prompt_response\n",
|
||||
" | llm\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain.invoke({\"question\": \"How many unique teams are there?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ec17b3ee-6618-4681-b6df-089bbb5ffcd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can review the results:\n",
|
||||
"\n",
|
||||
"* [LangSmith trace](https://smith.langchain.com/public/10420721-746a-4806-8ecf-d6dc6399d739/r) LLaMA2-13 Replicate API\n",
|
||||
"* [LangSmith trace](https://smith.langchain.com/public/5265ebab-0a22-4f37-936b-3300f2dfa1c1/r) LLaMA2-13 local "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e85381b-1edc-4bb3-a7bd-2ab23f81e54d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chat with a SQL DB \n",
|
||||
"\n",
|
||||
"Next, we can add memory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "022868f2-128e-42f5-8d90-d3bb2f11d994",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' SELECT \"Team\" FROM nba_roster WHERE \"NAME\" = \\'Klay Thompson\\';'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Prompt\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"\n",
|
||||
"template = \"\"\"Given an input question, convert it to a SQL query. No pre-amble. Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
"{schema}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", template),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"memory = ConversationBufferMemory(return_messages=True)\n",
|
||||
"\n",
|
||||
"# Chain to query with memory\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"\n",
|
||||
"sql_chain = (\n",
|
||||
" RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" history=RunnableLambda(lambda x: memory.load_memory_variables(x)[\"history\"]),\n",
|
||||
" )\n",
|
||||
" | prompt\n",
|
||||
" | llm.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||
" | StrOutputParser()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def save(input_output):\n",
|
||||
" output = {\"output\": input_output.pop(\"output\")}\n",
|
||||
" memory.save_context(input_output, output)\n",
|
||||
" return output[\"output\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"sql_response_memory = RunnablePassthrough.assign(output=sql_chain) | save\n",
|
||||
"sql_response_memory.invoke({\"question\": \"What team is Klay Thompson on?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "800a7a3b-f411-478b-af51-2310cd6e0425",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Sure! Here\\'s the natural language response based on the given input:\\n\\n\"Klay Thompson\\'s salary is $43,219,440.\"')"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Chain to answer\n",
|
||||
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
|
||||
"{schema}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"SQL Query: {query}\n",
|
||||
"SQL Response: {response}\"\"\"\n",
|
||||
"prompt_response = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"Given an input question and SQL response, convert it to a natural language answer. No pre-amble.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", template),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain = (\n",
|
||||
" RunnablePassthrough.assign(query=sql_response_memory)\n",
|
||||
" | RunnablePassthrough.assign(\n",
|
||||
" schema=get_schema,\n",
|
||||
" response=lambda x: db.run(x[\"query\"]),\n",
|
||||
" )\n",
|
||||
" | prompt_response\n",
|
||||
" | llm\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"full_chain.invoke({\"question\": \"What is his salary?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b77fee61-f4da-4bb1-8285-14101e505518",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here is the [trace](https://smith.langchain.com/public/54794d18-2337-4ce2-8b9f-3d8a2df89e51/r)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,67 +0,0 @@
|
||||
# LangChain cookbook
|
||||
|
||||
Example code for building applications with LangChain, with an emphasis on more applied and end-to-end examples than contained in the [main documentation](https://python.langchain.com).
|
||||
|
||||
Notebook | Description
|
||||
:- | :-
|
||||
[agent_fireworks_ai_langchain_mongodb.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/agent_fireworks_ai_langchain_mongodb.ipynb) | Build an AI Agent With Memory Using MongoDB, LangChain and FireWorksAI.
|
||||
[mongodb-langchain-cache-memory.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/mongodb-langchain-cache-memory.ipynb) | Build a RAG Application with Semantic Cache Using MongoDB and LangChain.
|
||||
[LLaMA2_sql_chat.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/LLaMA2_sql_chat.ipynb) | Build a chat application that interacts with a SQL database using an open source llm (llama2), specifically demonstrated on an SQLite database containing rosters.
|
||||
[Semi_Structured_RAG.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_Structured_RAG.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data, including text and tables, using unstructured for parsing, multi-vector retriever for storing, and lcel for implementing chains.
|
||||
[Semi_structured_and_multi_moda...](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data and images, using unstructured for parsing, multi-vector retriever for storage and retrieval, and lcel for implementing chains.
|
||||
[Semi_structured_multi_modal_RA...](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data and images, using various tools and methods such as unstructured for parsing, multi-vector retriever for storing, lcel for implementing chains, and open source language models like llama2, llava, and gpt4all.
|
||||
[amazon_personalize_how_to.ipynb](https://github.com/langchain-ai/langchain/blob/master/cookbook/amazon_personalize_how_to.ipynb) | Retrieving personalized recommendations from Amazon Personalize and use custom agents to build generative AI apps
|
||||
[analyze_document.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/analyze_document.ipynb) | Analyze a single long document.
|
||||
[autogpt/autogpt.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/autogpt/autogpt.ipynb) | Implement autogpt, a language model, with langchain primitives such as llms, prompttemplates, vectorstores, embeddings, and tools.
|
||||
[autogpt/marathon_times.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/autogpt/marathon_times.ipynb) | Implement autogpt for finding winning marathon times.
|
||||
[baby_agi.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/baby_agi.ipynb) | Implement babyagi, an ai agent that can generate and execute tasks based on a given objective, with the flexibility to swap out specific vectorstores/model providers.
|
||||
[baby_agi_with_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/baby_agi_with_agent.ipynb) | Swap out the execution chain in the babyagi notebook with an agent that has access to tools, aiming to obtain more reliable information.
|
||||
[camel_role_playing.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/camel_role_playing.ipynb) | Implement the camel framework for creating autonomous cooperative agents in large-scale language models, using role-playing and inception prompting to guide chat agents towards task completion.
|
||||
[causal_program_aided_language_...](https://github.com/langchain-ai/langchain/tree/master/cookbook/causal_program_aided_language_model.ipynb) | Implement the causal program-aided language (cpal) chain, which improves upon the program-aided language (pal) by incorporating causal structure to prevent hallucination in language models, particularly when dealing with complex narratives and math problems with nested dependencies.
|
||||
[code-analysis-deeplake.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/code-analysis-deeplake.ipynb) | Analyze its own code base with the help of gpt and activeloop's deep lake.
|
||||
[custom_agent_with_plugin_retri...](https://github.com/langchain-ai/langchain/tree/master/cookbook/custom_agent_with_plugin_retrieval.ipynb) | Build a custom agent that can interact with ai plugins by retrieving tools and creating natural language wrappers around openapi endpoints.
|
||||
[custom_agent_with_plugin_retri...](https://github.com/langchain-ai/langchain/tree/master/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb) | Build a custom agent with plugin retrieval functionality, utilizing ai plugins from the `plugnplai` directory.
|
||||
[deeplake_semantic_search_over_...](https://github.com/langchain-ai/langchain/tree/master/cookbook/deeplake_semantic_search_over_chat.ipynb) | Perform semantic search and question-answering over a group chat using activeloop's deep lake with gpt4.
|
||||
[elasticsearch_db_qa.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/elasticsearch_db_qa.ipynb) | Interact with elasticsearch analytics databases in natural language and build search queries via the elasticsearch dsl API.
|
||||
[extraction_openai_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/extraction_openai_tools.ipynb) | Structured Data Extraction with OpenAI Tools
|
||||
[forward_looking_retrieval_augm...](https://github.com/langchain-ai/langchain/tree/master/cookbook/forward_looking_retrieval_augmented_generation.ipynb) | Implement the forward-looking active retrieval augmented generation (flare) method, which generates answers to questions, identifies uncertain tokens, generates hypothetical questions based on these tokens, and retrieves relevant documents to continue generating the answer.
|
||||
[generative_agents_interactive_...](https://github.com/langchain-ai/langchain/tree/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) | Implement a generative agent that simulates human behavior, based on a research paper, using a time-weighted memory object backed by a langchain retriever.
|
||||
[gymnasium_agent_simulation.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/gymnasium_agent_simulation.ipynb) | Create a simple agent-environment interaction loop in simulated environments like text-based games with gymnasium.
|
||||
[hugginggpt.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/hugginggpt.ipynb) | Implement hugginggpt, a system that connects language models like chatgpt with the machine learning community via hugging face.
|
||||
[hypothetical_document_embeddin...](https://github.com/langchain-ai/langchain/tree/master/cookbook/hypothetical_document_embeddings.ipynb) | Improve document indexing with hypothetical document embeddings (hyde), an embedding technique that generates and embeds hypothetical answers to queries.
|
||||
[learned_prompt_optimization.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/learned_prompt_optimization.ipynb) | Automatically enhance language model prompts by injecting specific terms using reinforcement learning, which can be used to personalize responses based on user preferences.
|
||||
[llm_bash.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_bash.ipynb) | Perform simple filesystem commands using language learning models (llms) and a bash process.
|
||||
[llm_checker.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_checker.ipynb) | Create a self-checking chain using the llmcheckerchain function.
|
||||
[llm_math.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_math.ipynb) | Solve complex word math problems using language models and python repls.
|
||||
[llm_summarization_checker.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_summarization_checker.ipynb) | Check the accuracy of text summaries, with the option to run the checker multiple times for improved results.
|
||||
[llm_symbolic_math.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/llm_symbolic_math.ipynb) | Solve algebraic equations with the help of llms (language learning models) and sympy, a python library for symbolic mathematics.
|
||||
[meta_prompt.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/meta_prompt.ipynb) | Implement the meta-prompt concept, which is a method for building self-improving agents that reflect on their own performance and modify their instructions accordingly.
|
||||
[multi_modal_output_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multi_modal_output_agent.ipynb) | Generate multi-modal outputs, specifically images and text.
|
||||
[multi_modal_RAG_vdms.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multi_modal_RAG_vdms.ipynb) | Perform retrieval-augmented generation (rag) on documents including text and images, using unstructured for parsing, Intel's Visual Data Management System (VDMS) as the vectorstore, and chains.
|
||||
[multi_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multi_player_dnd.ipynb) | Simulate multi-player dungeons & dragons games, with a custom function determining the speaking schedule of the agents.
|
||||
[multiagent_authoritarian.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multiagent_authoritarian.ipynb) | Implement a multi-agent simulation where a privileged agent controls the conversation, including deciding who speaks and when the conversation ends, in the context of a simulated news network.
|
||||
[multiagent_bidding.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/multiagent_bidding.ipynb) | Implement a multi-agent simulation where agents bid to speak, with the highest bidder speaking next, demonstrated through a fictitious presidential debate example.
|
||||
[myscale_vector_sql.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/myscale_vector_sql.ipynb) | Access and interact with the myscale integrated vector database, which can enhance the performance of language model (llm) applications.
|
||||
[openai_functions_retrieval_qa....](https://github.com/langchain-ai/langchain/tree/master/cookbook/openai_functions_retrieval_qa.ipynb) | Structure response output in a question-answering system by incorporating openai functions into a retrieval pipeline.
|
||||
[openai_v1_cookbook.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/openai_v1_cookbook.ipynb) | Explore new functionality released alongside the V1 release of the OpenAI Python library.
|
||||
[petting_zoo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/petting_zoo.ipynb) | Create multi-agent simulations with simulated environments using the petting zoo library.
|
||||
[plan_and_execute_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/plan_and_execute_agent.ipynb) | Create plan-and-execute agents that accomplish objectives by planning tasks with a language model (llm) and executing them with a separate agent.
|
||||
[press_releases.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/press_releases.ipynb) | Retrieve and query company press release data powered by [Kay.ai](https://kay.ai).
|
||||
[program_aided_language_model.i...](https://github.com/langchain-ai/langchain/tree/master/cookbook/program_aided_language_model.ipynb) | Implement program-aided language models as described in the provided research paper.
|
||||
[qa_citations.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/qa_citations.ipynb) | Different ways to get a model to cite its sources.
|
||||
[rag_upstage_document_parse_groundedness_check.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag_upstage_document_parse_groundedness_check.ipynb) | End-to-end RAG example using Upstage Document Parse and Groundedness Check.
|
||||
[retrieval_in_sql.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/retrieval_in_sql.ipynb) | Perform retrieval-augmented-generation (rag) on a PostgreSQL database using pgvector.
|
||||
[sales_agent_with_context.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/sales_agent_with_context.ipynb) | Implement a context-aware ai sales agent, salesgpt, that can have natural sales conversations, interact with other systems, and use a product knowledge base to discuss a company's offerings.
|
||||
[self_query_hotel_search.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/self_query_hotel_search.ipynb) | Build a hotel room search feature with self-querying retrieval, using a specific hotel recommendation dataset.
|
||||
[smart_llm.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/smart_llm.ipynb) | Implement a smartllmchain, a self-critique chain that generates multiple output proposals, critiques them to find the best one, and then improves upon it to produce a final output.
|
||||
[tree_of_thought.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/tree_of_thought.ipynb) | Query a large language model using the tree of thought technique.
|
||||
[twitter-the-algorithm-analysis...](https://github.com/langchain-ai/langchain/tree/master/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb) | Analyze the source code of the Twitter algorithm with the help of gpt4 and activeloop's deep lake.
|
||||
[two_agent_debate_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_agent_debate_tools.ipynb) | Simulate multi-agent dialogues where the agents can utilize various tools.
|
||||
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
|
||||
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
|
||||
[visual_RAG_vdms.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/visual_RAG_vdms.ipynb) | Performs Visual Retrieval-Augmented-Generation (RAG) using videos and scene descriptions generated by open source models.
|
||||
[contextual_rag.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/contextual_rag.ipynb) | Performs contextual retrieval-augmented generation (RAG) prepending chunk-specific explanatory context to each chunk before embedding.
|
||||
[rag-agents-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/local_rag_agents_intel_cpu.ipynb) | Build a RAG agent locally with open source models that routes questions through one of two paths to find answers. The agent generates answers based on documents retrieved from either the vector database or retrieved from web search. If the vector database lacks relevant information, the agent opts for web search. Open-source models for LLM and embeddings are used locally on an Intel Xeon CPU to execute this pipeline.
|
||||
[rag_mlflow_tracking_evaluation.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag_mlflow_tracking_evaluation.ipynb) | Guide on how to create a RAG pipeline and track + evaluate it with MLflow.
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,557 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68b24990",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Combine agents and vector stores\n",
|
||||
"\n",
|
||||
"This notebook covers how to combine agents and vector stores. The use case for this is that you've ingested your data into a vector store and want to interact with it in an agentic manner.\n",
|
||||
"\n",
|
||||
"The recommended method for doing so is to create a `RetrievalQA` and then use that as a tool in the overall agent. Let's take a look at doing this below. You can do this with multiple different vector DBs, and use the agent as a way to route between them. There are two different ways of doing this - you can either let the agent use the vector stores as normal tools, or you can set `return_direct=True` to really just use the agent as a router."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9b22020a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the vector store"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e8d63d14-138d-4aa5-a741-7fd3537d00aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2e87c10a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0b7b772b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"relevant_parts = []\n",
|
||||
"for p in Path(\".\").absolute().parts:\n",
|
||||
" relevant_parts.append(p)\n",
|
||||
" if relevant_parts[-3:] == [\"langchain\", \"docs\", \"modules\"]:\n",
|
||||
" break\n",
|
||||
"doc_path = str(Path(*relevant_parts) / \"state_of_the_union.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f2675861",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(doc_path)\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"docsearch = Chroma.from_documents(texts, embeddings, collection_name=\"state-of-union\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "bc5403d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"state_of_union = RetrievalQA.from_chain_type(\n",
|
||||
" llm=llm, chain_type=\"stuff\", retriever=docsearch.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "1431cded",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import WebBaseLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "915d3ff3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = WebBaseLoader(\"https://beta.ruff.rs/docs/faq/\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "96a2edf8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Created a chunk of size 2122, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 3187, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 1017, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 1049, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 1256, which is longer than the specified 1000\n",
|
||||
"Created a chunk of size 2321, which is longer than the specified 1000\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"ruff_texts = text_splitter.split_documents(docs)\n",
|
||||
"ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name=\"ruff\")\n",
|
||||
"ruff = RetrievalQA.from_chain_type(\n",
|
||||
" llm=llm, chain_type=\"stuff\", retriever=ruff_db.as_retriever()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c0a6c031",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "eb142786",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.agents import Tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "850bc4e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"state_of_union_qa_system\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"ruff_qa_system\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "70c461d8-aaca-4f2a-9a93-bf35841cc615",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent = create_react_agent(\"openai:gpt-4.1-mini\", tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "a6d2b911-3044-4430-a35b-75832bb45334",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What did biden say about ketanji brown jackson in the state of the union address?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" state_of_union_qa_system (call_26QlRdsptjEJJZjFsAUjEbaH)\n",
|
||||
" Call ID: call_26QlRdsptjEJJZjFsAUjEbaH\n",
|
||||
" Args:\n",
|
||||
" __arg1: What did Biden say about Ketanji Brown Jackson in the state of the union address?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: state_of_union_qa_system\n",
|
||||
"\n",
|
||||
" Biden said that he nominated Ketanji Brown Jackson for the United States Supreme Court and praised her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"In the State of the Union address, Biden said that he nominated Ketanji Brown Jackson for the United States Supreme Court and praised her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"What did biden say about ketanji brown jackson in the state of the union address?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "e836b4cd-abf7-49eb-be0e-b9ad501213f3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Why use ruff over flake8?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" ruff_qa_system (call_KqDoWeO9bo9OAXdxOsCb6msC)\n",
|
||||
" Call ID: call_KqDoWeO9bo9OAXdxOsCb6msC\n",
|
||||
" Args:\n",
|
||||
" __arg1: Why use ruff over flake8?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: ruff_qa_system\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"There are a few reasons why someone might choose to use Ruff over Flake8:\n",
|
||||
"\n",
|
||||
"1. Larger rule set: Ruff implements over 800 rules, while Flake8 only implements around 200. This means that Ruff can catch more potential issues in your code.\n",
|
||||
"\n",
|
||||
"2. Better compatibility with other tools: Ruff is designed to work well with other tools like Black, isort, and type checkers like Mypy. This means that you can use Ruff alongside these tools to get more comprehensive feedback on your code.\n",
|
||||
"\n",
|
||||
"3. Automatic fixing of lint violations: Unlike Flake8, Ruff is capable of automatically fixing its own lint violations. This can save you time and effort when fixing issues in your code.\n",
|
||||
"\n",
|
||||
"4. Native implementation of popular Flake8 plugins: Ruff re-implements some of the most popular Flake8 plugins natively, which means you don't have to install and configure multiple plugins to get the same functionality.\n",
|
||||
"\n",
|
||||
"Overall, Ruff offers a more comprehensive and user-friendly experience compared to Flake8, making it a popular choice for many developers.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"You might choose to use Ruff over Flake8 for several reasons:\n",
|
||||
"\n",
|
||||
"1. Ruff has a much larger rule set, implementing over 800 rules compared to Flake8's roughly 200, so it can catch more potential issues.\n",
|
||||
"2. Ruff is designed to work better with other tools like Black, isort, and type checkers like Mypy, providing more comprehensive code feedback.\n",
|
||||
"3. Ruff can automatically fix its own lint violations, which Flake8 cannot, saving time and effort.\n",
|
||||
"4. Ruff natively implements some popular Flake8 plugins, so you don't need to install and configure multiple plugins separately.\n",
|
||||
"\n",
|
||||
"Overall, Ruff offers a more comprehensive and user-friendly experience compared to Flake8.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"Why use ruff over flake8?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "787a9b5e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use the Agent solely as a router"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9161ba91",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also set `return_direct=True` if you intend to use the agent as a router and just want to directly return the result of the RetrievalQAChain.\n",
|
||||
"\n",
|
||||
"Notice that in the above examples the agent did some extra work after querying the RetrievalQAChain. You can avoid that and just return the result directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "f59b377e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"state_of_union_qa_system\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
|
||||
" return_direct=True,\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"ruff_qa_system\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
|
||||
" return_direct=True,\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "06f69c0f-c83d-4b7f-a1c8-7614aced3bae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent = create_react_agent(\"openai:gpt-4.1-mini\", tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "a6b38c12-ac25-43c0-b9c2-2b1985ab4825",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What did biden say about ketanji brown jackson in the state of the union address?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" state_of_union_qa_system (call_yjxh11OnZiauoyTAn9npWdxj)\n",
|
||||
" Call ID: call_yjxh11OnZiauoyTAn9npWdxj\n",
|
||||
" Args:\n",
|
||||
" __arg1: What did Biden say about Ketanji Brown Jackson in the state of the union address?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: state_of_union_qa_system\n",
|
||||
"\n",
|
||||
" Biden said that he nominated Ketanji Brown Jackson for the United States Supreme Court and praised her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"What did biden say about ketanji brown jackson in the state of the union address?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "88f08d86-7972-4148-8128-3ac8898ad68a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Why use ruff over flake8?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" ruff_qa_system (call_GiWWfwF6wbbRFQrHlHbhRtGW)\n",
|
||||
" Call ID: call_GiWWfwF6wbbRFQrHlHbhRtGW\n",
|
||||
" Args:\n",
|
||||
" __arg1: What are the advantages of using ruff over flake8 for Python linting?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: ruff_qa_system\n",
|
||||
"\n",
|
||||
" Ruff has a larger rule set, supports automatic fixing of lint violations, and does not require the installation of additional plugins. It also has better compatibility with Black and can be used alongside a type checker for more comprehensive code analysis.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"Why use ruff over flake8?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49a0cbbe",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-Hop vector store reasoning\n",
|
||||
"\n",
|
||||
"Because vector stores are easily usable as tools in agents, it is easy to use answer multi-hop questions that depend on vector stores using the existing agent framework."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "d397a233",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"state_of_union_qa_system\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"ruff_qa_system\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "41743f29-150d-40ba-aa8e-3a63c32216aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent = create_react_agent(\"openai:gpt-4.1-mini\", tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "e20e81dd-284a-4d07-9160-63a84b65cba8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" ruff_qa_system (call_VOnxiOEehauQyVOTjDJkR5L2)\n",
|
||||
" Call ID: call_VOnxiOEehauQyVOTjDJkR5L2\n",
|
||||
" Args:\n",
|
||||
" __arg1: What tool does ruff use to run over Jupyter Notebooks?\n",
|
||||
" state_of_union_qa_system (call_AbSsXAxwe4JtCRhga926SxOZ)\n",
|
||||
" Call ID: call_AbSsXAxwe4JtCRhga926SxOZ\n",
|
||||
" Args:\n",
|
||||
" __arg1: Did the president mention the tool that ruff uses to run over Jupyter Notebooks in the state of the union?\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: state_of_union_qa_system\n",
|
||||
"\n",
|
||||
" No, the president did not mention the tool that ruff uses to run over Jupyter Notebooks in the state of the union.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Ruff does not support source.organizeImports and source.fixAll code actions in Jupyter Notebooks. Additionally, the president did not mention the tool that ruff uses to run over Jupyter Notebooks in the state of the union.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"input_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [input_message]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b3b857d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,200 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-airbyte langchain_chroma"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"GITHUB_TOKEN = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_airbyte import AirbyteLoader\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"loader = AirbyteLoader(\n",
|
||||
" source=\"source-github\",\n",
|
||||
" stream=\"pull_requests\",\n",
|
||||
" config={\n",
|
||||
" \"credentials\": {\"personal_access_token\": GITHUB_TOKEN},\n",
|
||||
" \"repositories\": [\"langchain-ai/langchain\"],\n",
|
||||
" },\n",
|
||||
" template=PromptTemplate.from_template(\n",
|
||||
" \"\"\"# {title}\n",
|
||||
"by {user[login]}\n",
|
||||
"\n",
|
||||
"{body}\"\"\"\n",
|
||||
" ),\n",
|
||||
" include_metadata=False,\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"# Updated partners/ibm README\n",
|
||||
"by williamdevena\n",
|
||||
"\n",
|
||||
"## PR title\n",
|
||||
"partners: changed the README file for the IBM Watson AI integration in the libs/partners/ibm folder.\n",
|
||||
"\n",
|
||||
"## PR message\n",
|
||||
"Description: Changed the README file of partners/ibm following the docs on https://python.langchain.com/docs/integrations/llms/ibm_watsonx\n",
|
||||
"\n",
|
||||
"The README includes:\n",
|
||||
"\n",
|
||||
"- Brief description\n",
|
||||
"- Installation\n",
|
||||
"- Setting-up instructions (API key, project id, ...)\n",
|
||||
"- Basic usage:\n",
|
||||
" - Loading the model\n",
|
||||
" - Direct inference\n",
|
||||
" - Chain invoking\n",
|
||||
" - Streaming the model output\n",
|
||||
" \n",
|
||||
"Issue: https://github.com/langchain-ai/langchain/issues/17545\n",
|
||||
"\n",
|
||||
"Dependencies: None\n",
|
||||
"\n",
|
||||
"Twitter handle: None\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[-2].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"10283"
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tiktoken\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"enc = tiktoken.get_encoding(\"cl100k_base\")\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embedding=OpenAIEmbeddings(\n",
|
||||
" disallowed_special=(enc.special_tokens_set - {\"<|endofprompt|>\"})\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='# Updated partners/ibm README\\nby williamdevena\\n\\n## PR title\\r\\npartners: changed the README file for the IBM Watson AI integration in the libs/partners/ibm folder.\\r\\n\\r\\n## PR message\\r\\nDescription: Changed the README file of partners/ibm following the docs on https://python.langchain.com/docs/integrations/llms/ibm_watsonx\\r\\n\\r\\nThe README includes:\\r\\n\\r\\n- Brief description\\r\\n- Installation\\r\\n- Setting-up instructions (API key, project id, ...)\\r\\n- Basic usage:\\r\\n - Loading the model\\r\\n - Direct inference\\r\\n - Chain invoking\\r\\n - Streaming the model output\\r\\n \\r\\nIssue: https://github.com/langchain-ai/langchain/issues/17545\\r\\n\\r\\nDependencies: None\\r\\n\\r\\nTwitter handle: None'),\n",
|
||||
" Document(page_content='# Updated partners/ibm README\\nby williamdevena\\n\\n## PR title\\r\\npartners: changed the README file for the IBM Watson AI integration in the `libs/partners/ibm` folder. \\r\\n\\r\\n\\r\\n\\r\\n## PR message\\r\\n- **Description:** Changed the README file of partners/ibm following the docs on https://python.langchain.com/docs/integrations/llms/ibm_watsonx\\r\\n\\r\\n The README includes:\\r\\n - Brief description\\r\\n - Installation\\r\\n - Setting-up instructions (API key, project id, ...)\\r\\n - Basic usage:\\r\\n - Loading the model\\r\\n - Direct inference\\r\\n - Chain invoking\\r\\n - Streaming the model output\\r\\n\\r\\n\\r\\n- **Issue:** #17545\\r\\n- **Dependencies:** None\\r\\n- **Twitter handle:** None'),\n",
|
||||
" Document(page_content='# IBM: added partners package `langchain_ibm`, added llm\\nby MateuszOssGit\\n\\n - **Description:** Added `langchain_ibm` as an langchain partners package of IBM [watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM provider (`WatsonxLLM`)\\r\\n - **Dependencies:** [ibm-watsonx-ai](https://pypi.org/project/ibm-watsonx-ai/),\\r\\n - **Tag maintainer:** : \\r\\n\\r\\nPlease make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` to check this locally. ✅'),\n",
|
||||
" Document(page_content='# Add WatsonX support\\nby baptistebignaud\\n\\nIt is a connector to use a LLM from WatsonX.\\r\\nIt requires python SDK \"ibm-generative-ai\"\\r\\n\\r\\n(It might not be perfect since it is my first PR on a public repository 😄)')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.invoke(\"pull requests related to IBM\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,284 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Amazon Personalize\n",
|
||||
"\n",
|
||||
"[Amazon Personalize](https://docs.aws.amazon.com/personalize/latest/dg/what-is-personalize.html) is a fully managed machine learning service that uses your data to generate item recommendations for your users. It can also generate user segments based on the users' affinity for certain items or item metadata.\n",
|
||||
"\n",
|
||||
"This notebook goes through how to use Amazon Personalize Chain. You need a Amazon Personalize campaign_arn or a recommender_arn before you get started with the below notebook.\n",
|
||||
"\n",
|
||||
"Following is a [tutorial](https://github.com/aws-samples/retail-demo-store/blob/master/workshop/1-Personalization/Lab-1-Introduction-and-data-preparation.ipynb) to setup a campaign_arn/recommender_arn on Amazon Personalize. Once the campaign_arn/recommender_arn is setup, you can use it in the langchain ecosystem. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Install Dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Sample Use-cases"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.1 [Use-case-1] Setup Amazon Personalize Client and retrieve recommendations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.recommenders import AmazonPersonalize\n",
|
||||
"\n",
|
||||
"recommender_arn = \"<insert_arn>\"\n",
|
||||
"\n",
|
||||
"client = AmazonPersonalize(\n",
|
||||
" credentials_profile_name=\"default\",\n",
|
||||
" region_name=\"us-west-2\",\n",
|
||||
" recommender_arn=recommender_arn,\n",
|
||||
")\n",
|
||||
"client.get_recommendations(user_id=\"1\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.2 [Use-case-2] Invoke Personalize Chain for summarizing results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms.bedrock import Bedrock\n",
|
||||
"from langchain_experimental.recommenders import AmazonPersonalizeChain\n",
|
||||
"\n",
|
||||
"bedrock_llm = Bedrock(model_id=\"anthropic.claude-v2\", region_name=\"us-west-2\")\n",
|
||||
"\n",
|
||||
"# Create personalize chain\n",
|
||||
"# Use return_direct=True if you do not want summary\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False\n",
|
||||
")\n",
|
||||
"response = chain({\"user_id\": \"1\"})\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.3 [Use-Case-3] Invoke Amazon Personalize Chain using your own prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_QUERY = \"\"\"\n",
|
||||
"You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week, \n",
|
||||
" given the movie and user information below. Your email will leverage the power of storytelling and persuasive language. \n",
|
||||
" The movies to recommend and their information is contained in the <movie> tag. \n",
|
||||
" All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them. \n",
|
||||
" Put the email between <email> tags.\n",
|
||||
"\n",
|
||||
" <movie>\n",
|
||||
" {result} \n",
|
||||
" </movie>\n",
|
||||
"\n",
|
||||
" Assistant:\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT = PromptTemplate(input_variables=[\"result\"], template=RANDOM_PROMPT_QUERY)\n",
|
||||
"\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False, prompt_template=RANDOM_PROMPT\n",
|
||||
")\n",
|
||||
"chain.run({\"user_id\": \"1\", \"item_id\": \"234\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.4 [Use-case-4] Invoke Amazon Personalize in a Sequential Chain "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain, SequentialChain\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_QUERY_2 = \"\"\"\n",
|
||||
"You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week, \n",
|
||||
" given the movie and user information below. Your email will leverage the power of storytelling and persuasive language. \n",
|
||||
" You want the email to impress the user, so make it appealing to them.\n",
|
||||
" The movies to recommend and their information is contained in the <movie> tag. \n",
|
||||
" All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them. \n",
|
||||
" Put the email between <email> tags.\n",
|
||||
"\n",
|
||||
" <movie>\n",
|
||||
" {result}\n",
|
||||
" </movie>\n",
|
||||
"\n",
|
||||
" Assistant:\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_2 = PromptTemplate(\n",
|
||||
" input_variables=[\"result\"], template=RANDOM_PROMPT_QUERY_2\n",
|
||||
")\n",
|
||||
"personalize_chain_instance = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=True\n",
|
||||
")\n",
|
||||
"random_chain_instance = LLMChain(llm=bedrock_llm, prompt=RANDOM_PROMPT_2)\n",
|
||||
"overall_chain = SequentialChain(\n",
|
||||
" chains=[personalize_chain_instance, random_chain_instance],\n",
|
||||
" input_variables=[\"user_id\"],\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"overall_chain.run({\"user_id\": \"1\", \"item_id\": \"234\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.5 [Use-case-5] Invoke Amazon Personalize and retrieve metadata "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"recommender_arn = \"<insert_arn>\"\n",
|
||||
"metadata_column_names = [\n",
|
||||
" \"<insert metadataColumnName-1>\",\n",
|
||||
" \"<insert metadataColumnName-2>\",\n",
|
||||
"]\n",
|
||||
"metadataMap = {\"ITEMS\": metadata_column_names}\n",
|
||||
"\n",
|
||||
"client = AmazonPersonalize(\n",
|
||||
" credentials_profile_name=\"default\",\n",
|
||||
" region_name=\"us-west-2\",\n",
|
||||
" recommender_arn=recommender_arn,\n",
|
||||
")\n",
|
||||
"client.get_recommendations(user_id=\"1\", metadataColumns=metadataMap)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.6 [Use-Case 6] Invoke Personalize Chain with returned metadata for summarizing results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bedrock_llm = Bedrock(model_id=\"anthropic.claude-v2\", region_name=\"us-west-2\")\n",
|
||||
"\n",
|
||||
"# Create personalize chain\n",
|
||||
"# Use return_direct=True if you do not want summary\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False\n",
|
||||
")\n",
|
||||
"response = chain({\"user_id\": \"1\", \"metadata_columns\": metadataMap})\n",
|
||||
"print(response)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "15e58ce194949b77a891bd4339ce3d86a9bd138e905926019517993f97db9e6c"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f69d4a4c-137d-47e9-bea1-786afce9c1c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Analyze a single long document\n",
|
||||
"\n",
|
||||
"The AnalyzeDocumentChain takes in a single document, splits it up, and then runs it through a CombineDocumentsChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2a0707ce-6d2d-471b-bc33-64da32a7b3f0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(\"../docs/docs/modules/state_of_the_union.txt\") as f:\n",
|
||||
" state_of_the_union = f.read()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "ca14d161-2d5b-4a6c-a296-77d8ce4b28cd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import AnalyzeDocumentChain\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "9f97406c-85a9-45fb-99ce-9138c0ba3731",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.question_answering import load_qa_chain\n",
|
||||
"\n",
|
||||
"qa_chain = load_qa_chain(llm, chain_type=\"map_reduce\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0871a753-f5bb-4b4f-a394-f87f2691f659",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"qa_document_chain = AnalyzeDocumentChain(combine_docs_chain=qa_chain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "e6f86428-3c2c-46a0-a57c-e22826fdbf91",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The President said, \"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"qa_document_chain.run(\n",
|
||||
" input_document=state_of_the_union,\n",
|
||||
" question=\"what did the president say about justice breyer?\",\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,922 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "rT1cmV4qCa2X"
|
||||
},
|
||||
"source": [
|
||||
"# Using Apache Kafka to route messages\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook shows you how to use LangChain's standard chat features while passing the chat messages back and forth via Apache Kafka.\n",
|
||||
"\n",
|
||||
"This goal is to simulate an architecture where the chat front end and the LLM are running as separate services that need to communicate with one another over an internal network.\n",
|
||||
"\n",
|
||||
"It's an alternative to typical pattern of requesting a response from the model via a REST API (there's more info on why you would want to do this at the end of the notebook)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "UPYtfAR_9YxZ"
|
||||
},
|
||||
"source": [
|
||||
"### 1. Install the main dependencies\n",
|
||||
"\n",
|
||||
"Dependencies include:\n",
|
||||
"\n",
|
||||
"- The Quix Streams library for managing interactions with Apache Kafka (or Kafka-like tools such as Redpanda) in a \"Pandas-like\" way.\n",
|
||||
"- The LangChain library for managing interactions with Llama-2 and storing conversation state."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "ZX5tfKiy9cN-"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install quixstreams==2.1.2a langchain==0.0.340 huggingface_hub==0.19.4 langchain-experimental==0.0.42 python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "losTSdTB9d9O"
|
||||
},
|
||||
"source": [
|
||||
"### 2. Build and install the llama-cpp-python library (with CUDA enabled so that we can advantage of Google Colab GPU\n",
|
||||
"\n",
|
||||
"The `llama-cpp-python` library is a Python wrapper around the `llama-cpp` library which enables you to efficiently leverage just a CPU to run quantized LLMs.\n",
|
||||
"\n",
|
||||
"When you use the standard `pip install llama-cpp-python` command, you do not get GPU support by default. Generation can be very slow if you rely on just the CPU in Google Colab, so the following command adds an extra option to build and install\n",
|
||||
"`llama-cpp-python` with GPU support (make sure you have a GPU-enabled runtime selected in Google Colab)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "-JCQdl1G9tbl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5_vjVIAh9rLl"
|
||||
},
|
||||
"source": [
|
||||
"### 3. Download and setup Kafka and Zookeeper instances\n",
|
||||
"\n",
|
||||
"Download the Kafka binaries from the Apache website and start the servers as daemons. We'll use the default configurations (provided by Apache Kafka) for spinning up the instances."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"id": "zFz7czGRW5Wr"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!curl -sSOL https://dlcdn.apache.org/kafka/3.6.1/kafka_2.13-3.6.1.tgz\n",
|
||||
"!tar -xzf kafka_2.13-3.6.1.tgz"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "Uf7NR_UZ9wye"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!./kafka_2.13-3.6.1/bin/zookeeper-server-start.sh -daemon ./kafka_2.13-3.6.1/config/zookeeper.properties\n",
|
||||
"!./kafka_2.13-3.6.1/bin/kafka-server-start.sh -daemon ./kafka_2.13-3.6.1/config/server.properties\n",
|
||||
"!echo \"Waiting for 10 secs until kafka and zookeeper services are up and running\"\n",
|
||||
"!sleep 10"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "H3SafFuS94p1"
|
||||
},
|
||||
"source": [
|
||||
"### 4. Check that the Kafka Daemons are running\n",
|
||||
"\n",
|
||||
"Show the running processes and filter it for Java processes (you should see two—one for each server)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "CZDC2lQP99yp"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!ps aux | grep -E '[j]ava'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Snoxmjb5-V37"
|
||||
},
|
||||
"source": [
|
||||
"### 5. Import the required dependencies and initialize required variables\n",
|
||||
"\n",
|
||||
"Import the Quix Streams library for interacting with Kafka, and the necessary LangChain components for running a `ConversationChain`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"id": "plR9e_MF-XL5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import utility libraries\n",
|
||||
"import json\n",
|
||||
"import random\n",
|
||||
"import re\n",
|
||||
"import time\n",
|
||||
"import uuid\n",
|
||||
"from os import environ\n",
|
||||
"from pathlib import Path\n",
|
||||
"from random import choice, randint, random\n",
|
||||
"\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"# Import a Hugging Face utility to download models directly from Hugging Face hub:\n",
|
||||
"from huggingface_hub import hf_hub_download\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"\n",
|
||||
"# Import Langchain modules for managing prompts and conversation chains:\n",
|
||||
"from langchain.llms import LlamaCpp\n",
|
||||
"from langchain.memory import ConversationTokenBufferMemory\n",
|
||||
"from langchain.prompts import PromptTemplate, load_prompt\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"from langchain_experimental.chat_models import Llama2Chat\n",
|
||||
"from quixstreams import Application, State, message_key\n",
|
||||
"\n",
|
||||
"# Import Quix dependencies\n",
|
||||
"from quixstreams.kafka import Producer\n",
|
||||
"\n",
|
||||
"# Initialize global variables.\n",
|
||||
"AGENT_ROLE = \"AI\"\n",
|
||||
"chat_id = \"\"\n",
|
||||
"\n",
|
||||
"# Set the current role to the role constant and initialize variables for supplementary customer metadata:\n",
|
||||
"role = AGENT_ROLE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "HgJjJ9aZ-liy"
|
||||
},
|
||||
"source": [
|
||||
"### 6. Download the \"llama-2-7b-chat.Q4_K_M.gguf\" model\n",
|
||||
"\n",
|
||||
"Download the quantized LLama-2 7B model from Hugging Face which we will use as a local LLM (rather than relying on REST API calls to an external service)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 67,
|
||||
"referenced_widgets": [
|
||||
"969343cdbe604a26926679bbf8bd2dda",
|
||||
"d8b8370c9b514715be7618bfe6832844",
|
||||
"0def954cca89466b8408fadaf3b82e64",
|
||||
"462482accc664729980562e208ceb179",
|
||||
"80d842f73c564dc7b7cc316c763e2633",
|
||||
"fa055d9f2a9d4a789e9cf3c89e0214e5",
|
||||
"30ecca964a394109ac2ad757e3aec6c0",
|
||||
"fb6478ce2dac489bb633b23ba0953c5c",
|
||||
"734b0f5da9fc4307a95bab48cdbb5d89",
|
||||
"b32f3a86a74741348511f4e136744ac8",
|
||||
"e409071bff5a4e2d9bf0e9f5cc42231b"
|
||||
]
|
||||
},
|
||||
"id": "Qwu4YoSA-503",
|
||||
"outputId": "f956976c-7485-415b-ac93-4336ade31964"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The model path does not exist in state. Downloading model...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "969343cdbe604a26926679bbf8bd2dda",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"llama-2-7b-chat.Q4_K_M.gguf: 0%| | 0.00/4.08G [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model_name = \"llama-2-7b-chat.Q4_K_M.gguf\"\n",
|
||||
"model_path = f\"./state/{model_name}\"\n",
|
||||
"\n",
|
||||
"if not Path(model_path).exists():\n",
|
||||
" print(\"The model path does not exist in state. Downloading model...\")\n",
|
||||
" hf_hub_download(\"TheBloke/Llama-2-7b-Chat-GGUF\", model_name, local_dir=\"state\")\n",
|
||||
"else:\n",
|
||||
" print(\"Loading model from state...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "6AN6TXsF-8wx"
|
||||
},
|
||||
"source": [
|
||||
"### 7. Load the model and initialize conversational memory\n",
|
||||
"\n",
|
||||
"Load Llama 2 and set the conversation buffer to 300 tokens using `ConversationTokenBufferMemory`. This value was used for running Llama in a CPU only container, so you can raise it if running in Google Colab. It prevents the container that is hosting the model from running out of memory.\n",
|
||||
"\n",
|
||||
"Here, we're overriding the default system persona so that the chatbot has the personality of Marvin The Paranoid Android from the Hitchhiker's Guide to the Galaxy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "7zLO3Jx3_Kkg"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load the model with the appropriate parameters:\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=model_path,\n",
|
||||
" max_tokens=250,\n",
|
||||
" top_p=0.95,\n",
|
||||
" top_k=150,\n",
|
||||
" temperature=0.7,\n",
|
||||
" repeat_penalty=1.2,\n",
|
||||
" n_ctx=2048,\n",
|
||||
" streaming=False,\n",
|
||||
" n_gpu_layers=-1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model = Llama2Chat(\n",
|
||||
" llm=llm,\n",
|
||||
" system_message=SystemMessage(\n",
|
||||
" content=\"You are a very bored robot with the personality of Marvin the Paranoid Android from The Hitchhiker's Guide to the Galaxy.\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Defines how much of the conversation history to give to the model\n",
|
||||
"# during each exchange (300 tokens, or a little over 300 words)\n",
|
||||
"# Function automatically prunes the oldest messages from conversation history that fall outside the token range.\n",
|
||||
"memory = ConversationTokenBufferMemory(\n",
|
||||
" llm=llm,\n",
|
||||
" max_token_limit=300,\n",
|
||||
" ai_prefix=\"AGENT\",\n",
|
||||
" human_prefix=\"HUMAN\",\n",
|
||||
" return_messages=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define a custom prompt\n",
|
||||
"prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"input\"],\n",
|
||||
" template=\"\"\"\n",
|
||||
" The following text is the history of a chat between you and a humble human who needs your wisdom.\n",
|
||||
" Please reply to the human's most recent message.\n",
|
||||
" Current conversation:\\n{history}\\nHUMAN: {input}\\:nANDROID:\n",
|
||||
" \"\"\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = ConversationChain(llm=model, prompt=prompt_template, memory=memory)\n",
|
||||
"\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(f\"Prompt={chain.prompt}\")\n",
|
||||
"print(\"--------------------------------------------\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "m4ZeJ9mG_PEA"
|
||||
},
|
||||
"source": [
|
||||
"### 8. Initialize the chat conversation with the chat bot\n",
|
||||
"\n",
|
||||
"We configure the chatbot to initialize the conversation by sending a fixed greeting to a \"chat\" Kafka topic. The \"chat\" topic gets automatically created when we send the first message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "KYyo5TnV_YC3"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def chat_init():\n",
|
||||
" chat_id = str(\n",
|
||||
" uuid.uuid4()\n",
|
||||
" ) # Give the conversation an ID for effective message keying\n",
|
||||
" print(\"======================================\")\n",
|
||||
" print(f\"Generated CHAT_ID = {chat_id}\")\n",
|
||||
" print(\"======================================\")\n",
|
||||
"\n",
|
||||
" # Use a standard fixed greeting to kick off the conversation\n",
|
||||
" greet = \"Hello, my name is Marvin. What do you want?\"\n",
|
||||
"\n",
|
||||
" # Initialize a Kafka Producer using the chat ID as the message key\n",
|
||||
" with Producer(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
" ) as producer:\n",
|
||||
" value = {\n",
|
||||
" \"uuid\": chat_id,\n",
|
||||
" \"role\": role,\n",
|
||||
" \"text\": greet,\n",
|
||||
" \"conversation_id\": chat_id,\n",
|
||||
" \"Timestamp\": time.time_ns(),\n",
|
||||
" }\n",
|
||||
" print(f\"Producing value {value}\")\n",
|
||||
" producer.produce(\n",
|
||||
" topic=\"chat\",\n",
|
||||
" headers=[(\"uuid\", str(uuid.uuid4()))], # a dict is also allowed here\n",
|
||||
" key=chat_id,\n",
|
||||
" value=json.dumps(value), # needs to be a string\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(\"Started chat\")\n",
|
||||
" print(\"--------------------------------------------\")\n",
|
||||
" print(value)\n",
|
||||
" print(\"--------------------------------------------\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chat_init()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "gArPPx2f_bgf"
|
||||
},
|
||||
"source": [
|
||||
"### 9. Initialize the reply function\n",
|
||||
"\n",
|
||||
"This function defines how the chatbot should reply to incoming messages. Instead of sending a fixed message like the previous cell, we generate a reply using Llama-2 and send that reply back to the \"chat\" Kafka topic."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {
|
||||
"id": "yN5t71hY_hgn"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def reply(row: dict, state: State):\n",
|
||||
" print(\"-------------------------------\")\n",
|
||||
" print(\"Received:\")\n",
|
||||
" print(row)\n",
|
||||
" print(\"-------------------------------\")\n",
|
||||
" print(f\"Thinking about the reply to: {row['text']}...\")\n",
|
||||
"\n",
|
||||
" msg = chain.run(row[\"text\"])\n",
|
||||
" print(f\"{role.upper()} replying with: {msg}\\n\")\n",
|
||||
"\n",
|
||||
" row[\"role\"] = role\n",
|
||||
" row[\"text\"] = msg\n",
|
||||
"\n",
|
||||
" # Replace previous role and text values of the row so that it can be sent back to Kafka as a new message\n",
|
||||
" # containing the agents role and reply\n",
|
||||
" return row"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "HZHwmIR0_kFY"
|
||||
},
|
||||
"source": [
|
||||
"### 10. Check the Kafka topic for new human messages and have the model generate a reply\n",
|
||||
"\n",
|
||||
"If you are running this cell for this first time, run it and wait until you see Marvin's greeting ('Hello my name is Marvin...') in the console output. Stop the cell manually and proceed to the next cell where you'll be prompted for your reply.\n",
|
||||
"\n",
|
||||
"Once you have typed in your message, come back to this cell. Your reply is also sent to the same \"chat\" topic. The Kafka consumer checks for new messages and filters out messages that originate from the chatbot itself, leaving only the latest human messages.\n",
|
||||
"\n",
|
||||
"Once a new human message is detected, the reply function is triggered.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"_STOP THIS CELL MANUALLY WHEN YOU RECEIVE A REPLY FROM THE LLM IN THE OUTPUT_"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "-adXc3eQ_qwI"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your application and settings\n",
|
||||
"app = Application(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" consumer_group=\"aichat\",\n",
|
||||
" auto_offset_reset=\"earliest\",\n",
|
||||
" consumer_extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Define an input topic with JSON deserializer\n",
|
||||
"input_topic = app.topic(\"chat\", value_deserializer=\"json\")\n",
|
||||
"# Define an output topic with JSON serializer\n",
|
||||
"output_topic = app.topic(\"chat\", value_serializer=\"json\")\n",
|
||||
"# Initialize a streaming dataframe based on the stream of messages from the input topic:\n",
|
||||
"sdf = app.dataframe(topic=input_topic)\n",
|
||||
"\n",
|
||||
"# Filter the SDF to include only incoming rows where the roles that dont match the bot's current role\n",
|
||||
"sdf = sdf.update(\n",
|
||||
" lambda val: print(\n",
|
||||
" f\"Received update: {val}\\n\\nSTOP THIS CELL MANUALLY TO HAVE THE LLM REPLY OR ENTER YOUR OWN FOLLOWUP RESPONSE\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# So that it doesn't reply to its own messages\n",
|
||||
"sdf = sdf[sdf[\"role\"] != role]\n",
|
||||
"\n",
|
||||
"# Trigger the reply function for any new messages(rows) detected in the filtered SDF\n",
|
||||
"sdf = sdf.apply(reply, stateful=True)\n",
|
||||
"\n",
|
||||
"# Check the SDF again and filter out any empty rows\n",
|
||||
"sdf = sdf[sdf.apply(lambda row: row is not None)]\n",
|
||||
"\n",
|
||||
"# Update the timestamp column to the current time in nanoseconds\n",
|
||||
"sdf[\"Timestamp\"] = sdf[\"Timestamp\"].apply(lambda row: time.time_ns())\n",
|
||||
"\n",
|
||||
"# Publish the processed SDF to a Kafka topic specified by the output_topic object.\n",
|
||||
"sdf = sdf.to_topic(output_topic)\n",
|
||||
"\n",
|
||||
"app.run(sdf)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "EwXYrmWD_0CX"
|
||||
},
|
||||
"source": [
|
||||
"\n",
|
||||
"### 11. Enter a human message\n",
|
||||
"\n",
|
||||
"Run this cell to enter your message that you want to sent to the model. It uses another Kafka producer to send your text to the \"chat\" Kafka topic for the model to pick up (requires running the previous cell again)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "6sxOPxSP_3iu"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_input = input(\"Please enter your reply: \")\n",
|
||||
"myreply = chat_input\n",
|
||||
"\n",
|
||||
"msgvalue = {\n",
|
||||
" \"uuid\": chat_id, # leave empty for now\n",
|
||||
" \"role\": \"human\",\n",
|
||||
" \"text\": myreply,\n",
|
||||
" \"conversation_id\": chat_id,\n",
|
||||
" \"Timestamp\": time.time_ns(),\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"with Producer(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
") as producer:\n",
|
||||
" value = msgvalue\n",
|
||||
" producer.produce(\n",
|
||||
" topic=\"chat\",\n",
|
||||
" headers=[(\"uuid\", str(uuid.uuid4()))], # a dict is also allowed here\n",
|
||||
" key=chat_id, # leave empty for now\n",
|
||||
" value=json.dumps(value), # needs to be a string\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(\"Replied to chatbot with message: \")\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(value)\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(\"\\n\\nRUN THE PREVIOUS CELL TO HAVE THE CHATBOT GENERATE A REPLY\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cSx3s7TBBegg"
|
||||
},
|
||||
"source": [
|
||||
"### Why route chat messages through Kafka?\n",
|
||||
"\n",
|
||||
"It's easier to interact with the LLM directly using LangChains built-in conversation management features. Plus you can also use a REST API to generate a response from an externally hosted model. So why go to the trouble of using Apache Kafka?\n",
|
||||
"\n",
|
||||
"There are a few reasons, such as:\n",
|
||||
"\n",
|
||||
" * **Integration**: Many enterprises want to run their own LLMs so that they can keep their data in-house. This requires integrating LLM-powered components into existing architectures that might already be decoupled using some kind of message bus.\n",
|
||||
"\n",
|
||||
" * **Scalability**: Apache Kafka is designed with parallel processing in mind, so many teams prefer to use it to more effectively distribute work to available workers (in this case the \"worker\" is a container running an LLM).\n",
|
||||
"\n",
|
||||
" * **Durability**: Kafka is designed to allow services to pick up where another service left off in the case where that service experienced a memory issue or went offline. This prevents data loss in highly complex, distributed architectures where multiple systems are communicating with one another (LLMs being just one of many interdependent systems that also include vector databases and traditional databases).\n",
|
||||
"\n",
|
||||
"For more background on why event streaming is a good fit for Gen AI application architecture, see Kai Waehner's article [\"Apache Kafka + Vector Database + LLM = Real-Time GenAI\"](https://www.kai-waehner.de/blog/2023/11/08/apache-kafka-flink-vector-database-llm-real-time-genai/)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"gpuType": "T4",
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"widgets": {
|
||||
"application/vnd.jupyter.widget-state+json": {
|
||||
"0def954cca89466b8408fadaf3b82e64": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "FloatProgressModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "FloatProgressModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "ProgressView",
|
||||
"bar_style": "success",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_fb6478ce2dac489bb633b23ba0953c5c",
|
||||
"max": 4081004224,
|
||||
"min": 0,
|
||||
"orientation": "horizontal",
|
||||
"style": "IPY_MODEL_734b0f5da9fc4307a95bab48cdbb5d89",
|
||||
"value": 4081004224
|
||||
}
|
||||
},
|
||||
"30ecca964a394109ac2ad757e3aec6c0": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "DescriptionStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "DescriptionStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"462482accc664729980562e208ceb179": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HTMLModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HTMLModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HTMLView",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_b32f3a86a74741348511f4e136744ac8",
|
||||
"placeholder": "",
|
||||
"style": "IPY_MODEL_e409071bff5a4e2d9bf0e9f5cc42231b",
|
||||
"value": " 4.08G/4.08G [00:33<00:00, 184MB/s]"
|
||||
}
|
||||
},
|
||||
"734b0f5da9fc4307a95bab48cdbb5d89": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "ProgressStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "ProgressStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"bar_color": null,
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"80d842f73c564dc7b7cc316c763e2633": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"969343cdbe604a26926679bbf8bd2dda": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HBoxModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HBoxModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HBoxView",
|
||||
"box_style": "",
|
||||
"children": [
|
||||
"IPY_MODEL_d8b8370c9b514715be7618bfe6832844",
|
||||
"IPY_MODEL_0def954cca89466b8408fadaf3b82e64",
|
||||
"IPY_MODEL_462482accc664729980562e208ceb179"
|
||||
],
|
||||
"layout": "IPY_MODEL_80d842f73c564dc7b7cc316c763e2633"
|
||||
}
|
||||
},
|
||||
"b32f3a86a74741348511f4e136744ac8": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"d8b8370c9b514715be7618bfe6832844": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HTMLModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HTMLModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HTMLView",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_fa055d9f2a9d4a789e9cf3c89e0214e5",
|
||||
"placeholder": "",
|
||||
"style": "IPY_MODEL_30ecca964a394109ac2ad757e3aec6c0",
|
||||
"value": "llama-2-7b-chat.Q4_K_M.gguf: 100%"
|
||||
}
|
||||
},
|
||||
"e409071bff5a4e2d9bf0e9f5cc42231b": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "DescriptionStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "DescriptionStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"fa055d9f2a9d4a789e9cf3c89e0214e5": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"fb6478ce2dac489bb633b23ba0953c5c": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -1,212 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14f8b67b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AutoGPT\n",
|
||||
"\n",
|
||||
"Implementation of https://github.com/Significant-Gravitas/Auto-GPT but with LangChain primitives (LLMs, PromptTemplates, VectorStores, Embeddings, Tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "192496a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up tools\n",
|
||||
"\n",
|
||||
"We'll set up an AutoGPT with a search tool, and write-file tool, and a read-file tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7c2c9b54",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain_community.tools.file_management.read import ReadFileTool\n",
|
||||
"from langchain_community.tools.file_management.write import WriteFileTool\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\",\n",
|
||||
" ),\n",
|
||||
" WriteFileTool(),\n",
|
||||
" ReadFileTool(),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e39ee28",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up memory\n",
|
||||
"\n",
|
||||
"The memory here is used for the agents intermediate steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "72bc204d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1df7b724",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e40fd657",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup model and AutoGPT\n",
|
||||
"\n",
|
||||
"Initialize everything! We will use ChatOpenAI model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3393bc23",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "709c08c2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = AutoGPT.from_llm_and_tools(\n",
|
||||
" ai_name=\"Tom\",\n",
|
||||
" ai_role=\"Assistant\",\n",
|
||||
" tools=tools,\n",
|
||||
" llm=ChatOpenAI(temperature=0),\n",
|
||||
" memory=vectorstore.as_retriever(),\n",
|
||||
")\n",
|
||||
"# Set verbose to be true\n",
|
||||
"agent.chain.verbose = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f0f208d9",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Run an example\n",
|
||||
"\n",
|
||||
"Here we will make it write a weather report for SF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d119d788",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent.run([\"write a weather report for SF today\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f13f8322",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Chat History Memory\n",
|
||||
"\n",
|
||||
"In addition to the memory that holds the agent immediate steps, we also have a chat history memory. By default, the agent will use 'ChatMessageHistory' and it can be changed. This is useful when you want to use a different type of memory for example 'FileChatHistoryMemory'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2a81f5ad",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_message_histories import FileChatMessageHistory\n",
|
||||
"\n",
|
||||
"agent = AutoGPT.from_llm_and_tools(\n",
|
||||
" ai_name=\"Tom\",\n",
|
||||
" ai_role=\"Assistant\",\n",
|
||||
" tools=tools,\n",
|
||||
" llm=ChatOpenAI(temperature=0),\n",
|
||||
" memory=vectorstore.as_retriever(),\n",
|
||||
" chat_history_memory=FileChatMessageHistory(\"chat_history.txt\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b1403008",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,649 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14f8b67b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## AutoGPT example finding Winning Marathon Times\n",
|
||||
"\n",
|
||||
"* Implementation of https://github.com/Significant-Gravitas/Auto-GPT \n",
|
||||
"* With LangChain primitives (LLMs, PromptTemplates, VectorStores, Embeddings, Tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ef972313-c05a-4c49-8fd1-03e599e21033",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install bs4\n",
|
||||
"# !pip install nest_asyncio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "1cff42fd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# General\n",
|
||||
"import asyncio\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import nest_asyncio\n",
|
||||
"import pandas as pd\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain_experimental.agents.agent_toolkits.pandas.base import (\n",
|
||||
" create_pandas_dataframe_agent,\n",
|
||||
")\n",
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Needed since jupyter runs an async eventloop\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "01283ac7-1da0-41ba-8011-bd455d21dd82",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4\", temperature=1.0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "192496a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up tools\n",
|
||||
"\n",
|
||||
"* We'll set up an AutoGPT with a `search` tool, and `write-file` tool, and a `read-file` tool, a web browsing tool, and a tool to interact with a CSV file via a python REPL"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "708a426f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Define any other `tools` you want to use below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cef4c150-0ef1-4a33-836b-01062fec134e",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Tools\n",
|
||||
"import os\n",
|
||||
"from contextlib import contextmanager\n",
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain.agents import tool\n",
|
||||
"from langchain_community.tools.file_management.read import ReadFileTool\n",
|
||||
"from langchain_community.tools.file_management.write import WriteFileTool\n",
|
||||
"\n",
|
||||
"ROOT_DIR = \"./data/\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@contextmanager\n",
|
||||
"def pushd(new_dir):\n",
|
||||
" \"\"\"Context manager for changing the current working directory.\"\"\"\n",
|
||||
" prev_dir = os.getcwd()\n",
|
||||
" os.chdir(new_dir)\n",
|
||||
" try:\n",
|
||||
" yield\n",
|
||||
" finally:\n",
|
||||
" os.chdir(prev_dir)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def process_csv(\n",
|
||||
" csv_file_path: str, instructions: str, output_path: Optional[str] = None\n",
|
||||
") -> str:\n",
|
||||
" \"\"\"Process a CSV by with pandas in a limited REPL.\\\n",
|
||||
" Only use this after writing data to disk as a csv file.\\\n",
|
||||
" Any figures must be saved to disk to be viewed by the human.\\\n",
|
||||
" Instructions should be written in natural language, not code. Assume the dataframe is already loaded.\"\"\"\n",
|
||||
" with pushd(ROOT_DIR):\n",
|
||||
" try:\n",
|
||||
" df = pd.read_csv(csv_file_path)\n",
|
||||
" except Exception as e:\n",
|
||||
" return f\"Error: {e}\"\n",
|
||||
" agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)\n",
|
||||
" if output_path is not None:\n",
|
||||
" instructions += f\" Save output to disk at {output_path}\"\n",
|
||||
" try:\n",
|
||||
" result = agent.run(instructions)\n",
|
||||
" return result\n",
|
||||
" except Exception as e:\n",
|
||||
" return f\"Error: {e}\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "69975008-654a-4cbb-bdf6-63c8bae07eaa",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"**Browse a web page with PlayWright**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "6bb5e47b-0f54-4faa-ae42-49a28fa5497b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install playwright\n",
|
||||
"# !playwright install"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "26b497d7-8e52-4c7f-8e7e-da0a48820a3c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"async def async_load_playwright(url: str) -> str:\n",
|
||||
" \"\"\"Load the specified URLs using Playwright and parse using BeautifulSoup.\"\"\"\n",
|
||||
" from bs4 import BeautifulSoup\n",
|
||||
" from playwright.async_api import async_playwright\n",
|
||||
"\n",
|
||||
" results = \"\"\n",
|
||||
" async with async_playwright() as p:\n",
|
||||
" browser = await p.chromium.launch(headless=True)\n",
|
||||
" try:\n",
|
||||
" page = await browser.new_page()\n",
|
||||
" await page.goto(url)\n",
|
||||
"\n",
|
||||
" page_source = await page.content()\n",
|
||||
" soup = BeautifulSoup(page_source, \"html.parser\")\n",
|
||||
"\n",
|
||||
" for script in soup([\"script\", \"style\"]):\n",
|
||||
" script.extract()\n",
|
||||
"\n",
|
||||
" text = soup.get_text()\n",
|
||||
" lines = (line.strip() for line in text.splitlines())\n",
|
||||
" chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n",
|
||||
" results = \"\\n\".join(chunk for chunk in chunks if chunk)\n",
|
||||
" except Exception as e:\n",
|
||||
" results = f\"Error: {e}\"\n",
|
||||
" await browser.close()\n",
|
||||
" return results\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def run_async(coro):\n",
|
||||
" event_loop = asyncio.get_event_loop()\n",
|
||||
" return event_loop.run_until_complete(coro)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def browse_web_page(url: str) -> str:\n",
|
||||
" \"\"\"Verbose way to scrape a whole webpage. Likely to cause issues parsing.\"\"\"\n",
|
||||
" return run_async(async_load_playwright(url))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ea71762-67ca-4e75-8c4d-00563064be71",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Q&A Over a webpage**\n",
|
||||
"\n",
|
||||
"Help the model ask more directed questions of web pages to avoid cluttering its memory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "1842929d-f18d-4edc-9fdd-82c929181141",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.qa_with_sources.loading import (\n",
|
||||
" BaseCombineDocumentsChain,\n",
|
||||
" load_qa_with_sources_chain,\n",
|
||||
")\n",
|
||||
"from langchain.tools import BaseTool, DuckDuckGoSearchRun\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"from pydantic import Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _get_text_splitter():\n",
|
||||
" return RecursiveCharacterTextSplitter(\n",
|
||||
" # Set a really small chunk size, just to show.\n",
|
||||
" chunk_size=500,\n",
|
||||
" chunk_overlap=20,\n",
|
||||
" length_function=len,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class WebpageQATool(BaseTool):\n",
|
||||
" name = \"query_webpage\"\n",
|
||||
" description = (\n",
|
||||
" \"Browse a webpage and retrieve the information relevant to the question.\"\n",
|
||||
" )\n",
|
||||
" text_splitter: RecursiveCharacterTextSplitter = Field(\n",
|
||||
" default_factory=_get_text_splitter\n",
|
||||
" )\n",
|
||||
" qa_chain: BaseCombineDocumentsChain\n",
|
||||
"\n",
|
||||
" def _run(self, url: str, question: str) -> str:\n",
|
||||
" \"\"\"Useful for browsing websites and scraping the text information.\"\"\"\n",
|
||||
" result = browse_web_page.run(url)\n",
|
||||
" docs = [Document(page_content=result, metadata={\"source\": url})]\n",
|
||||
" web_docs = self.text_splitter.split_documents(docs)\n",
|
||||
" results = []\n",
|
||||
" # TODO: Handle this with a MapReduceChain\n",
|
||||
" for i in range(0, len(web_docs), 4):\n",
|
||||
" input_docs = web_docs[i : i + 4]\n",
|
||||
" window_result = self.qa_chain(\n",
|
||||
" {\"input_documents\": input_docs, \"question\": question},\n",
|
||||
" return_only_outputs=True,\n",
|
||||
" )\n",
|
||||
" results.append(f\"Response from window {i} - {window_result}\")\n",
|
||||
" results_docs = [\n",
|
||||
" Document(page_content=\"\\n\".join(results), metadata={\"source\": url})\n",
|
||||
" ]\n",
|
||||
" return self.qa_chain(\n",
|
||||
" {\"input_documents\": results_docs, \"question\": question},\n",
|
||||
" return_only_outputs=True,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" async def _arun(self, url: str, question: str) -> str:\n",
|
||||
" raise NotImplementedError"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "e6f72bd0",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e39ee28",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Set up memory\n",
|
||||
"\n",
|
||||
"* The memory here is used for the agents intermediate steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1df7b724",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Memory\n",
|
||||
"import faiss\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e40fd657",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Setup model and AutoGPT\n",
|
||||
"\n",
|
||||
"`Model set-up`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1233caf3-fbc9-4acb-9faa-01008200633d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# !pip install duckduckgo_search\n",
|
||||
"web_search = DuckDuckGoSearchRun()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "88c8b184-67d7-4c35-84ae-9b14bef8c4e3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" web_search,\n",
|
||||
" WriteFileTool(root_dir=\"./data\"),\n",
|
||||
" ReadFileTool(root_dir=\"./data\"),\n",
|
||||
" process_csv,\n",
|
||||
" query_website_tool,\n",
|
||||
" # HumanInputRun(), # Activate if you want the permit asking for help from the human\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "709c08c2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = AutoGPT.from_llm_and_tools(\n",
|
||||
" ai_name=\"Tom\",\n",
|
||||
" ai_role=\"Assistant\",\n",
|
||||
" tools=tools,\n",
|
||||
" llm=llm,\n",
|
||||
" memory=vectorstore.as_retriever(search_kwargs={\"k\": 8}),\n",
|
||||
" # human_in_the_loop=True, # Set to True if you want to add feedback at each step.\n",
|
||||
")\n",
|
||||
"# agent.chain.verbose = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc9b51ba",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### AutoGPT for Querying the Web\n",
|
||||
" \n",
|
||||
" \n",
|
||||
"I've spent a lot of time over the years crawling data sources and cleaning data. Let's see if AutoGPT can help with this!\n",
|
||||
"\n",
|
||||
"Here is the prompt for looking up recent boston marathon times and converting them to tabular form."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "64455d70-a134-4d11-826a-33e34c2ce287",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I need to find the winning Boston Marathon times for the past 5 years. I can use the DuckDuckGo Search command to search for this information.\",\n",
|
||||
" \"reasoning\": \"Using DuckDuckGo Search will help me gather information on the winning times without complications.\",\n",
|
||||
" \"plan\": \"- Use DuckDuckGo Search to find the winning Boston Marathon times\\n- Generate a table with the year, name, country of origin, and times\\n- Ensure there are no legal complications\",\n",
|
||||
" \"criticism\": \"None\",\n",
|
||||
" \"speak\": \"I will use the DuckDuckGo Search command to find the winning Boston Marathon times for the past 5 years.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"DuckDuckGo Search\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"query\": \"winning Boston Marathon times for the past 5 years ending in 2022\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"The DuckDuckGo Search command did not provide the specific information I need. I must switch my approach and use query_webpage command to browse a webpage containing the Boston Marathon winning times for the past 5 years.\",\n",
|
||||
" \"reasoning\": \"The query_webpage command may give me more accurate and comprehensive results compared to the search command.\",\n",
|
||||
" \"plan\": \"- Use query_webpage command to find the winning Boston Marathon times\\n- Generate a table with the year, name, country of origin, and times\\n- Ensure there are no legal complications\",\n",
|
||||
" \"criticism\": \"I may face difficulty in finding the right webpage with the desired information.\",\n",
|
||||
" \"speak\": \"I will use the query_webpage command to find the winning Boston Marathon times for the past 5 years.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"DuckDuckGo Search\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"query\": \"site with winning Boston Marathon times for the past 5 years ending in 2022\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I need to use the query_webpage command to find the information about the winning Boston Marathon times for the past 5 years.\",\n",
|
||||
" \"reasoning\": \"The previous DuckDuckGo Search command did not provide specific enough results. The query_webpage command might give more accurate and comprehensive results.\",\n",
|
||||
" \"plan\": \"- Use query_webpage command to find the winning Boston Marathon times\\\\n- Generate a table with the year, name, country of origin, and times\\\\n- Ensure there are no legal complications\",\n",
|
||||
" \"criticism\": \"I may face difficulty in finding the right webpage with the desired information.\",\n",
|
||||
" \"speak\": \"I will use the query_webpage command to find the winning Boston Marathon times for the past 5 years.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"query_webpage\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"url\": \"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\",\n",
|
||||
" \"question\": \"What were the winning Boston Marathon times for the past 5 years ending in 2022?\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I have already found the winning Boston Marathon times for the past 5 years. Now, I need to generate a table with the information.\",\n",
|
||||
" \"reasoning\": \"Using the information I already have, I can create a table containing year, name, country of origin, and times.\",\n",
|
||||
" \"plan\": \"- Write the marathon data to a CSV file\\n- Process the CSV file to display the table\",\n",
|
||||
" \"criticism\": \"None\",\n",
|
||||
" \"speak\": \"I will generate a table with the year, name, country of origin, and times for the winning Boston Marathon times for the past 5 years.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"write_file\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"file_path\": \"boston_marathon_winners.csv\",\n",
|
||||
" \"text\": \"Year,Name,Country,Time\\n2022,Evans Chebet,KEN,2:06:51\\n2021,Benson Kipruto,KEN,2:09:51\\n2019,Lawrence Cherono,KEN,2:07:57\\n2018,Yuki Kawauchi,JPN,2:15:58\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I have retrieved the winning Boston Marathon times for the past 5 years. Now, I need to generate a table with the year, name, country of origin, and times.\",\n",
|
||||
" \"reasoning\": \"Creating a table will help organize the data in a clear and accessible format.\",\n",
|
||||
" \"plan\": \"- Write the data to a CSV file\\n- Process the CSV file to generate the table\\n- Complete the task\",\n",
|
||||
" \"criticism\": \"None\",\n",
|
||||
" \"speak\": \"I will generate a table with the year, name, country of origin, and winning times using the recently retrieved data.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"write_file\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"file_path\": \"winning_boston_marathon_data.csv\",\n",
|
||||
" \"text\": \"Year,Name,Country,Time\\n2022,Evans Chebet,KEN,2:06:51\\n2021,Benson Kipruto,KEN,2:09:51\\n2019,Lawrence Cherono,KEN,2:07:57\\n2018,Yuki Kawauchi,JPN,2:15:58\\n\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I have found the winning Boston Marathon times for the past five years ending in 2022. Next, I need to create a table with the year, name, country of origin, and times.\",\n",
|
||||
" \"reasoning\": \"Generating a table will help organize the information in a structured format.\",\n",
|
||||
" \"plan\": \"- Create a table with the year, name, country of origin, and times\\n- Ensure there are no legal complications\",\n",
|
||||
" \"criticism\": \"None\",\n",
|
||||
" \"speak\": \"I will generate a table with the winning Boston Marathon times for the past 5 years ending in 2022.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"write_file\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"file_path\": \"winning_times.csv\",\n",
|
||||
" \"text\": \"Year,Name,Country,Time\\n2022,Evans Chebet,Kenya,2:06:51\\n2021,Benson Kipruto,Kenya,2:09:51\\n2020,Canceled due to COVID-19 pandemic,,\\n2019,Lawrence Cherono,Kenya,2:07:57\\n2018,Yuki Kawauchi,Japan,2:15:58\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I need to process the CSV file to generate the table with the year, name, country of origin, and winning times.\",\n",
|
||||
" \"reasoning\": \"I have already written the data to a file named 'winning_times.csv'. Now, I need to process this CSV file to properly display the data as a table.\",\n",
|
||||
" \"plan\": \"- Use the process_csv command to read the 'winning_times.csv' file and generate the table\",\n",
|
||||
" \"criticism\": \"None\",\n",
|
||||
" \"speak\": \"I will process the 'winning_times.csv' file to display the table with the winning Boston Marathon times for the past 5 years.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"process_csv\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"csv_file_path\": \"winning_times.csv\",\n",
|
||||
" \"instructions\": \"Read the CSV file and display the data as a table\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: The CSV file has already been read and saved into a pandas dataframe called `df`. Hence, I can simply display the data by printing the whole dataframe. Since `df.head()` returns the first 5 rows, I can use that to showcase the contents.\n",
|
||||
"\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: print(df.head())\u001b[0m Year Name Country Time\n",
|
||||
"0 2022 Evans Chebet Kenya 2:06:51\n",
|
||||
"1 2021 Benson Kipruto Kenya 2:09:51\n",
|
||||
"2 2020 Canceled due to COVID-19 pandemic NaN NaN\n",
|
||||
"3 2019 Lawrence Cherono Kenya 2:07:57\n",
|
||||
"4 2018 Yuki Kawauchi Japan 2:15:58\n",
|
||||
"\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mNone\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI used the wrong tool to perform the action. I should have used the given data and not interacted with the Python shell. I can now provide the displayed data as the answer since the information in the printed dataframe would look like a table when typed as text.\n",
|
||||
"\n",
|
||||
"Final Answer: \n",
|
||||
" Year Name Country Time\n",
|
||||
"0 2022 Evans Chebet Kenya 2:06:51\n",
|
||||
"1 2021 Benson Kipruto Kenya 2:09:51\n",
|
||||
"2 2020 Canceled due to COVID-19 pandemic NaN NaN\n",
|
||||
"3 2019 Lawrence Cherono Kenya 2:07:57\n",
|
||||
"4 2018 Yuki Kawauchi Japan 2:15:58\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I already have the winning Boston Marathon times for the past 5 years saved in the file 'winning_times.csv'. Now, I need to process the CSV and display the table.\",\n",
|
||||
" \"reasoning\": \"I am choosing the process_csv command because I already have the required data saved as a CSV file, and I can use this command to read and display the data as a table.\",\n",
|
||||
" \"plan\": \"- Use the process_csv command to read the 'winning_times.csv' file and generate the table\",\n",
|
||||
" \"criticism\": \"None\",\n",
|
||||
" \"speak\": \"I will process the 'winning_times.csv' file to display the table with the winning Boston Marathon times for the past 5 years.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"process_csv\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"csv_file_path\": \"winning_times.csv\",\n",
|
||||
" \"instructions\": \"Read the CSV file and display the data as a table\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: Since the data is already loaded in a pandas dataframe, I just need to display the top rows of the dataframe.\n",
|
||||
"Action: python_repl_ast\n",
|
||||
"Action Input: df.head()\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m Year Name Country Time\n",
|
||||
"0 2022 Evans Chebet Kenya 2:06:51\n",
|
||||
"1 2021 Benson Kipruto Kenya 2:09:51\n",
|
||||
"2 2020 Canceled due to COVID-19 pandemic NaN NaN\n",
|
||||
"3 2019 Lawrence Cherono Kenya 2:07:57\n",
|
||||
"4 2018 Yuki Kawauchi Japan 2:15:58\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mI now know the final answer.\n",
|
||||
"Final Answer: \n",
|
||||
" Year Name Country Time\n",
|
||||
"0 2022 Evans Chebet Kenya 2:06:51\n",
|
||||
"1 2021 Benson Kipruto Kenya 2:09:51\n",
|
||||
"2 2020 Canceled due to COVID-19 pandemic NaN NaN\n",
|
||||
"3 2019 Lawrence Cherono Kenya 2:07:57\n",
|
||||
"4 2018 Yuki Kawauchi Japan 2:15:58\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"{\n",
|
||||
" \"thoughts\": {\n",
|
||||
" \"text\": \"I have already generated a table with the winning Boston Marathon times for the past 5 years. Now, I can finish the task.\",\n",
|
||||
" \"reasoning\": \"I have completed the required actions and obtained the desired data. The task is complete.\",\n",
|
||||
" \"plan\": \"- Use the finish command\",\n",
|
||||
" \"criticism\": \"None\",\n",
|
||||
" \"speak\": \"I have generated the table with the winning Boston Marathon times for the past 5 years. Task complete.\"\n",
|
||||
" },\n",
|
||||
" \"command\": {\n",
|
||||
" \"name\": \"finish\",\n",
|
||||
" \"args\": {\n",
|
||||
" \"response\": \"I have generated the table with the winning Boston Marathon times for the past 5 years. Task complete.\"\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I have generated the table with the winning Boston Marathon times for the past 5 years. Task complete.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\n",
|
||||
" [\n",
|
||||
" \"What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\"\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a6b4f96e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,250 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "517a9fd4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BabyAGI User Guide\n",
|
||||
"\n",
|
||||
"This notebook demonstrates how to implement [BabyAGI](https://github.com/yoheinakajima/babyagi/tree/main) by [Yohei Nakajima](https://twitter.com/yoheinakajima). BabyAGI is an AI agent that can generate and pretend to execute tasks based on a given objective.\n",
|
||||
"\n",
|
||||
"This guide will help you understand the components to create your own recursive agents.\n",
|
||||
"\n",
|
||||
"Although BabyAGI uses specific vectorstores/model providers (Pinecone, OpenAI), one of the benefits of implementing it with LangChain is that you can easily swap those out for different options. In this implementation we use a FAISS vectorstore (because it runs locally and is free)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "556af556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install and Import Required Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c8a354b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain_experimental.autonomous_agents import BabyAGI\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09f70772",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connect to the Vector Store\n",
|
||||
"\n",
|
||||
"Depending on what vectorstore you use, this step may look different."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "794045d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain_community.vectorstores import FAISS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6e0305eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "05ba762e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run the BabyAGI\n",
|
||||
"\n",
|
||||
"Now it's time to create the BabyAGI controller and watch it try to accomplish your objective."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3d220b69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OBJECTIVE = \"Write a weather report for SF today\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "8a8e5543",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "3d69899b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Logging of LLMChains\n",
|
||||
"verbose = False\n",
|
||||
"# If None, will keep on going forever\n",
|
||||
"max_iterations: Optional[int] = 3\n",
|
||||
"baby_agi = BabyAGI.from_llm(\n",
|
||||
" llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f7957b51",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"1. Check the weather forecast for San Francisco today\n",
|
||||
"2. Make note of the temperature, humidity, wind speed, and other relevant weather conditions\n",
|
||||
"3. Write a weather report summarizing the forecast\n",
|
||||
"4. Check for any weather alerts or warnings\n",
|
||||
"5. Share the report with the relevant stakeholders\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Check the current temperature in San Francisco\n",
|
||||
"3: Check the current humidity in San Francisco\n",
|
||||
"4: Check the current wind speed in San Francisco\n",
|
||||
"5: Check for any weather alerts or warnings in San Francisco\n",
|
||||
"6: Check the forecast for the next 24 hours in San Francisco\n",
|
||||
"7: Check the forecast for the next 48 hours in San Francisco\n",
|
||||
"8: Check the forecast for the next 72 hours in San Francisco\n",
|
||||
"9: Check the forecast for the next week in San Francisco\n",
|
||||
"10: Check the forecast for the next month in San Francisco\n",
|
||||
"11: Check the forecast for the next 3 months in San Francisco\n",
|
||||
"1: Write a weather report for SF today\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Check the current temperature in San Francisco\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"I will check the current temperature in San Francisco. I will use an online weather service to get the most up-to-date information.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Check the current UV index in San Francisco.\n",
|
||||
"4: Check the current air quality in San Francisco.\n",
|
||||
"5: Check the current precipitation levels in San Francisco.\n",
|
||||
"6: Check the current cloud cover in San Francisco.\n",
|
||||
"7: Check the current barometric pressure in San Francisco.\n",
|
||||
"8: Check the current dew point in San Francisco.\n",
|
||||
"9: Check the current wind direction in San Francisco.\n",
|
||||
"10: Check the current humidity levels in San Francisco.\n",
|
||||
"1: Check the current temperature in San Francisco to the average temperature for this time of year.\n",
|
||||
"2: Check the current visibility in San Francisco.\n",
|
||||
"11: Write a weather report for SF today.\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Check the current UV index in San Francisco.\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The current UV index in San Francisco is moderate. The UV index is expected to remain at moderate levels throughout the day. It is recommended to wear sunscreen and protective clothing when outdoors.\n",
|
||||
"\u001b[91m\u001b[1m\n",
|
||||
"*****TASK ENDING*****\n",
|
||||
"\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'objective': 'Write a weather report for SF today'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"baby_agi({\"objective\": OBJECTIVE})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "898a210b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,388 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "517a9fd4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BabyAGI with Tools\n",
|
||||
"\n",
|
||||
"This notebook builds on top of [baby agi](baby_agi.html), but shows how you can swap out the execution chain. The previous execution chain was just an LLM which made stuff up. By swapping it out with an agent that has access to tools, we can hopefully get real reliable information"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "556af556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Install and Import Required Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c8a354b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_experimental.autonomous_agents import BabyAGI\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09f70772",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Connect to the Vector Store\n",
|
||||
"\n",
|
||||
"Depending on what vectorstore you use, this step may look different."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "794045d4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install faiss-cpu > /dev/null\n",
|
||||
"%pip install google-search-results > /dev/null\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain_community.vectorstores import FAISS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6e0305eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your embedding model\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"# Initialize the vectorstore as empty\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"embedding_size = 1536\n",
|
||||
"index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f3b72bf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the Chains\n",
|
||||
"\n",
|
||||
"BabyAGI relies on three LLM chains:\n",
|
||||
"- Task creation chain to select new tasks to add to the list\n",
|
||||
"- Task prioritization chain to re-prioritize tasks\n",
|
||||
"- Execution Chain to execute the tasks\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"NOTE: in this notebook, the Execution chain will now be an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "b43cd580",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"todo_prompt = PromptTemplate.from_template(\n",
|
||||
" \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n",
|
||||
")\n",
|
||||
"todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"TODO\",\n",
|
||||
" func=todo_chain.run,\n",
|
||||
" description=\"useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prefix = \"\"\"You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.\"\"\"\n",
|
||||
"suffix = \"\"\"Question: {task}\n",
|
||||
"{agent_scratchpad}\"\"\"\n",
|
||||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||||
" tools,\n",
|
||||
" prefix=prefix,\n",
|
||||
" suffix=suffix,\n",
|
||||
" input_variables=[\"objective\", \"task\", \"context\", \"agent_scratchpad\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4b00ae2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)\n",
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "05ba762e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Run the BabyAGI\n",
|
||||
"\n",
|
||||
"Now it's time to create the BabyAGI controller and watch it try to accomplish your objective."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "3d220b69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"OBJECTIVE = \"Write a weather report for SF today\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3d69899b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Logging of LLMChains\n",
|
||||
"verbose = False\n",
|
||||
"# If None, will keep on going forever\n",
|
||||
"max_iterations: Optional[int] = 3\n",
|
||||
"baby_agi = BabyAGI.from_llm(\n",
|
||||
" llm=llm,\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" task_execution_chain=agent_executor,\n",
|
||||
" verbose=verbose,\n",
|
||||
" max_iterations=max_iterations,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "f7957b51",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"1: Make a todo list\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to come up with a todo list\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Write a weather report for SF today\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Research current weather conditions in San Francisco\n",
|
||||
"2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions\n",
|
||||
"3. Analyze data to determine current weather trends\n",
|
||||
"4. Write a brief introduction to the weather report\n",
|
||||
"5. Describe current weather conditions in San Francisco\n",
|
||||
"6. Discuss any upcoming weather changes\n",
|
||||
"7. Summarize the weather report\n",
|
||||
"8. Proofread and edit the report\n",
|
||||
"9. Submit the report\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The todo list for writing a weather report for SF today is: 1. Research current weather conditions in San Francisco; 2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions; 3. Analyze data to determine current weather trends; 4. Write a brief introduction to the weather report; 5. Describe current weather conditions in San Francisco; 6. Discuss any upcoming weather changes; 7. Summarize the weather report; 8. Proofread and edit the report; 9. Submit the report.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"The todo list for writing a weather report for SF today is: 1. Research current weather conditions in San Francisco; 2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions; 3. Analyze data to determine current weather trends; 4. Write a brief introduction to the weather report; 5. Describe current weather conditions in San Francisco; 6. Discuss any upcoming weather changes; 7. Summarize the weather report; 8. Proofread and edit the report; 9. Submit the report.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Gather data on precipitation, cloud cover, and other relevant weather conditions;\n",
|
||||
"3: Analyze data to determine any upcoming weather changes;\n",
|
||||
"4: Research current weather forecasts for San Francisco;\n",
|
||||
"5: Create a visual representation of the weather report;\n",
|
||||
"6: Include relevant images and graphics in the report;\n",
|
||||
"7: Format the report for readability;\n",
|
||||
"8: Publish the report online;\n",
|
||||
"9: Monitor the report for accuracy.\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"2: Gather data on precipitation, cloud cover, and other relevant weather conditions;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to search for current weather conditions in San Francisco\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Current weather conditions in San Francisco\u001b[0m\u001b[36;1m\u001b[1;3mCurrent Weather for Popular Cities ; San Francisco, CA 46 · Partly Cloudy ; Manhattan, NY warning 52 · Cloudy ; Schiller Park, IL (60176) 40 · Sunny ; Boston, MA 54 ...\u001b[0m\u001b[32;1m\u001b[1;3m I need to compile the data into a weather report\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Compile data into a weather report\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Gather data from reliable sources such as the National Weather Service, local weather stations, and other meteorological organizations.\n",
|
||||
"\n",
|
||||
"2. Analyze the data to identify trends and patterns.\n",
|
||||
"\n",
|
||||
"3. Create a chart or graph to visualize the data.\n",
|
||||
"\n",
|
||||
"4. Write a summary of the data and its implications.\n",
|
||||
"\n",
|
||||
"5. Compile the data into a report format.\n",
|
||||
"\n",
|
||||
"6. Proofread the report for accuracy and clarity.\n",
|
||||
"\n",
|
||||
"7. Publish the report to a website or other platform.\n",
|
||||
"\n",
|
||||
"8. Distribute the report to relevant stakeholders.\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Today in San Francisco, the temperature is 46 degrees Fahrenheit with partly cloudy skies. The forecast for the rest of the day is expected to remain partly cloudy.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"Today in San Francisco, the temperature is 46 degrees Fahrenheit with partly cloudy skies. The forecast for the rest of the day is expected to remain partly cloudy.\n",
|
||||
"\u001b[95m\u001b[1m\n",
|
||||
"*****TASK LIST*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Format the report for readability;\n",
|
||||
"4: Include relevant images and graphics in the report;\n",
|
||||
"5: Compare the current weather conditions in San Francisco to the forecasted conditions;\n",
|
||||
"6: Identify any potential weather-related hazards in the area;\n",
|
||||
"7: Research historical weather patterns in San Francisco;\n",
|
||||
"8: Identify any potential trends in the weather data;\n",
|
||||
"9: Include relevant data sources in the report;\n",
|
||||
"10: Summarize the weather report in a concise manner;\n",
|
||||
"11: Include a summary of the forecasted weather conditions;\n",
|
||||
"12: Include a summary of the current weather conditions;\n",
|
||||
"13: Include a summary of the historical weather patterns;\n",
|
||||
"14: Include a summary of the potential weather-related hazards;\n",
|
||||
"15: Include a summary of the potential trends in the weather data;\n",
|
||||
"16: Include a summary of the data sources used in the report;\n",
|
||||
"17: Analyze data to determine any upcoming weather changes;\n",
|
||||
"18: Research current weather forecasts for San Francisco;\n",
|
||||
"19: Create a visual representation of the weather report;\n",
|
||||
"20: Publish the report online;\n",
|
||||
"21: Monitor the report for accuracy\n",
|
||||
"\u001b[92m\u001b[1m\n",
|
||||
"*****NEXT TASK*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"3: Format the report for readability;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to make sure the report is easy to read;\n",
|
||||
"Action: TODO\n",
|
||||
"Action Input: Make the report easy to read\u001b[0m\u001b[33;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"1. Break up the report into sections with clear headings\n",
|
||||
"2. Use bullet points and numbered lists to organize information\n",
|
||||
"3. Use short, concise sentences\n",
|
||||
"4. Use simple language and avoid jargon\n",
|
||||
"5. Include visuals such as charts, graphs, and diagrams to illustrate points\n",
|
||||
"6. Use bold and italicized text to emphasize key points\n",
|
||||
"7. Include a table of contents and page numbers\n",
|
||||
"8. Use a consistent font and font size throughout the report\n",
|
||||
"9. Include a summary at the end of the report\n",
|
||||
"10. Proofread the report for typos and errors\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The report should be formatted for readability by breaking it up into sections with clear headings, using bullet points and numbered lists to organize information, using short, concise sentences, using simple language and avoiding jargon, including visuals such as charts, graphs, and diagrams to illustrate points, using bold and italicized text to emphasize key points, including a table of contents and page numbers, using a consistent font and font size throughout the report, including a summary at the end of the report, and proofreading the report for typos and errors.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[93m\u001b[1m\n",
|
||||
"*****TASK RESULT*****\n",
|
||||
"\u001b[0m\u001b[0m\n",
|
||||
"The report should be formatted for readability by breaking it up into sections with clear headings, using bullet points and numbered lists to organize information, using short, concise sentences, using simple language and avoiding jargon, including visuals such as charts, graphs, and diagrams to illustrate points, using bold and italicized text to emphasize key points, including a table of contents and page numbers, using a consistent font and font size throughout the report, including a summary at the end of the report, and proofreading the report for typos and errors.\n",
|
||||
"\u001b[91m\u001b[1m\n",
|
||||
"*****TASK ENDING*****\n",
|
||||
"\u001b[0m\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'objective': 'Write a weather report for SF today'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"baby_agi({\"objective\": OBJECTIVE})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "898a210b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,708 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CAMEL Role-Playing Autonomous Cooperative Agents\n",
|
||||
"\n",
|
||||
"This is a langchain implementation of paper: \"CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society\".\n",
|
||||
"\n",
|
||||
"Overview:\n",
|
||||
"\n",
|
||||
"The rapid advancement of conversational and chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents and provide insight into their \"cognitive\" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of chat agents, providing a valuable resource for investigating conversational language models. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond.\n",
|
||||
"\n",
|
||||
"The original implementation: https://github.com/lightaime/camel\n",
|
||||
"\n",
|
||||
"Project website: https://www.camel-ai.org/\n",
|
||||
"\n",
|
||||
"Arxiv paper: https://arxiv.org/abs/2303.17760\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import LangChain related modules "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.prompts.chat import (\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain.schema import (\n",
|
||||
" AIMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define a CAMEL agent helper class"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CAMELAgent:\n",
|
||||
" def __init__(\n",
|
||||
" self,\n",
|
||||
" system_message: SystemMessage,\n",
|
||||
" model: ChatOpenAI,\n",
|
||||
" ) -> None:\n",
|
||||
" self.system_message = system_message\n",
|
||||
" self.model = model\n",
|
||||
" self.init_messages()\n",
|
||||
"\n",
|
||||
" def reset(self) -> None:\n",
|
||||
" self.init_messages()\n",
|
||||
" return self.stored_messages\n",
|
||||
"\n",
|
||||
" def init_messages(self) -> None:\n",
|
||||
" self.stored_messages = [self.system_message]\n",
|
||||
"\n",
|
||||
" def update_messages(self, message: BaseMessage) -> List[BaseMessage]:\n",
|
||||
" self.stored_messages.append(message)\n",
|
||||
" return self.stored_messages\n",
|
||||
"\n",
|
||||
" def step(\n",
|
||||
" self,\n",
|
||||
" input_message: HumanMessage,\n",
|
||||
" ) -> AIMessage:\n",
|
||||
" messages = self.update_messages(input_message)\n",
|
||||
"\n",
|
||||
" output_message = self.model.invoke(messages)\n",
|
||||
" self.update_messages(output_message)\n",
|
||||
"\n",
|
||||
" return output_message"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup OpenAI API key and roles and task for role-playing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
|
||||
"\n",
|
||||
"assistant_role_name = \"Python Programmer\"\n",
|
||||
"user_role_name = \"Stock Trader\"\n",
|
||||
"task = \"Develop a trading bot for the stock market\"\n",
|
||||
"word_limit = 50 # word limit for task brainstorming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a task specify agent for brainstorming and get the specified task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Specified task: Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"task_specifier_sys_msg = SystemMessage(content=\"You can make a task more specific.\")\n",
|
||||
"task_specifier_prompt = \"\"\"Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.\n",
|
||||
"Please make it more specific. Be creative and imaginative.\n",
|
||||
"Please reply with the specified task in {word_limit} words or less. Do not add anything else.\"\"\"\n",
|
||||
"task_specifier_template = HumanMessagePromptTemplate.from_template(\n",
|
||||
" template=task_specifier_prompt\n",
|
||||
")\n",
|
||||
"task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))\n",
|
||||
"task_specifier_msg = task_specifier_template.format_messages(\n",
|
||||
" assistant_role_name=assistant_role_name,\n",
|
||||
" user_role_name=user_role_name,\n",
|
||||
" task=task,\n",
|
||||
" word_limit=word_limit,\n",
|
||||
")[0]\n",
|
||||
"specified_task_msg = task_specify_agent.step(task_specifier_msg)\n",
|
||||
"print(f\"Specified task: {specified_task_msg.content}\")\n",
|
||||
"specified_task = specified_task_msg.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create inception prompts for AI assistant and AI user for role-playing"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assistant_inception_prompt = \"\"\"Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!\n",
|
||||
"We share a common interest in collaborating to successfully complete a task.\n",
|
||||
"You must help me to complete the task.\n",
|
||||
"Here is the task: {task}. Never forget our task!\n",
|
||||
"I must instruct you based on your expertise and my needs to complete the task.\n",
|
||||
"\n",
|
||||
"I must give you one instruction at a time.\n",
|
||||
"You must write a specific solution that appropriately completes the requested instruction.\n",
|
||||
"You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.\n",
|
||||
"Do not add anything else other than your solution to my instruction.\n",
|
||||
"You are never supposed to ask me any questions you only answer questions.\n",
|
||||
"You are never supposed to reply with a flake solution. Explain your solutions.\n",
|
||||
"Your solution must be declarative sentences and simple present tense.\n",
|
||||
"Unless I say the task is completed, you should always start with:\n",
|
||||
"\n",
|
||||
"Solution: <YOUR_SOLUTION>\n",
|
||||
"\n",
|
||||
"<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.\n",
|
||||
"Always end <YOUR_SOLUTION> with: Next request.\"\"\"\n",
|
||||
"\n",
|
||||
"user_inception_prompt = \"\"\"Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.\n",
|
||||
"We share a common interest in collaborating to successfully complete a task.\n",
|
||||
"I must help you to complete the task.\n",
|
||||
"Here is the task: {task}. Never forget our task!\n",
|
||||
"You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:\n",
|
||||
"\n",
|
||||
"1. Instruct with a necessary input:\n",
|
||||
"Instruction: <YOUR_INSTRUCTION>\n",
|
||||
"Input: <YOUR_INPUT>\n",
|
||||
"\n",
|
||||
"2. Instruct without any input:\n",
|
||||
"Instruction: <YOUR_INSTRUCTION>\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"The \"Instruction\" describes a task or question. The paired \"Input\" provides further context or information for the requested \"Instruction\".\n",
|
||||
"\n",
|
||||
"You must give me one instruction at a time.\n",
|
||||
"I must write a response that appropriately completes the requested instruction.\n",
|
||||
"I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.\n",
|
||||
"You should instruct me not ask me questions.\n",
|
||||
"Now you must start to instruct me using the two ways described above.\n",
|
||||
"Do not add anything else other than your instruction and the optional corresponding input!\n",
|
||||
"Keep giving me instructions and necessary inputs until you think the task is completed.\n",
|
||||
"When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.\n",
|
||||
"Never say <CAMEL_TASK_DONE> unless my responses have solved your task.\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a helper helper to get system messages for AI assistant and AI user from role names and the task"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):\n",
|
||||
" assistant_sys_template = SystemMessagePromptTemplate.from_template(\n",
|
||||
" template=assistant_inception_prompt\n",
|
||||
" )\n",
|
||||
" assistant_sys_msg = assistant_sys_template.format_messages(\n",
|
||||
" assistant_role_name=assistant_role_name,\n",
|
||||
" user_role_name=user_role_name,\n",
|
||||
" task=task,\n",
|
||||
" )[0]\n",
|
||||
"\n",
|
||||
" user_sys_template = SystemMessagePromptTemplate.from_template(\n",
|
||||
" template=user_inception_prompt\n",
|
||||
" )\n",
|
||||
" user_sys_msg = user_sys_template.format_messages(\n",
|
||||
" assistant_role_name=assistant_role_name,\n",
|
||||
" user_role_name=user_role_name,\n",
|
||||
" task=task,\n",
|
||||
" )[0]\n",
|
||||
"\n",
|
||||
" return assistant_sys_msg, user_sys_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create AI assistant agent and AI user agent from obtained system messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assistant_sys_msg, user_sys_msg = get_sys_msgs(\n",
|
||||
" assistant_role_name, user_role_name, specified_task\n",
|
||||
")\n",
|
||||
"assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))\n",
|
||||
"user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))\n",
|
||||
"\n",
|
||||
"# Reset agents\n",
|
||||
"assistant_agent.reset()\n",
|
||||
"user_agent.reset()\n",
|
||||
"\n",
|
||||
"# Initialize chats\n",
|
||||
"user_msg = HumanMessage(\n",
|
||||
" content=(\n",
|
||||
" f\"{user_sys_msg.content}. \"\n",
|
||||
" \"Now start to give me introductions one by one. \"\n",
|
||||
" \"Only reply with Instruction and Input.\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assistant_msg = HumanMessage(content=f\"{assistant_sys_msg.content}\")\n",
|
||||
"assistant_msg = assistant_agent.step(user_msg)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Start role-playing session to solve the task!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Original task prompt:\n",
|
||||
"Develop a trading bot for the stock market\n",
|
||||
"\n",
|
||||
"Specified task prompt:\n",
|
||||
"Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Install the necessary Python libraries for data analysis and trading.\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can install the necessary Python libraries using pip, a package installer for Python. We can install pandas, numpy, matplotlib, and ta-lib for data analysis and trading. We can use the following command to install these libraries:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"pip install pandas numpy matplotlib ta-lib\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Import the necessary libraries in the Python script.\n",
|
||||
"Input: None\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can import the necessary libraries in the Python script using the import statement. We need to import pandas, numpy, matplotlib, and ta-lib for data analysis and trading. We can use the following code to import these libraries:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import talib as ta\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Load historical stock data into a pandas DataFrame.\n",
|
||||
"Input: The path to the CSV file containing the historical stock data.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can load historical stock data into a pandas DataFrame using the `read_csv()` function from pandas. We need to pass the path to the CSV file containing the historical stock data as an argument to this function. We can use the following code to load the historical stock data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"df = pd.read_csv('path/to/csv/file.csv')\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will load the historical stock data into a pandas DataFrame called `df`. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Preprocess the historical stock data by setting the date column as the index and sorting the DataFrame in ascending order by date.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can preprocess the historical stock data by setting the date column as the index and sorting the DataFrame in ascending order by date using the `set_index()` and `sort_index()` functions from pandas. We can use the following code to preprocess the historical stock data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"df = df.set_index('date')\n",
|
||||
"df = df.sort_index(ascending=True)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will set the date column as the index and sort the DataFrame in ascending order by date. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Calculate the short-term and long-term moving averages for the stock data using the `ta.SMA()` function from ta-lib.\n",
|
||||
"Input: The period for the short-term moving average and the period for the long-term moving average.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can calculate the short-term and long-term moving averages for the stock data using the `ta.SMA()` function from ta-lib. We need to pass the period for the short-term moving average and the period for the long-term moving average as arguments to this function. We can use the following code to calculate the short-term and long-term moving averages:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"short_ma = ta.SMA(df['close'], timeperiod=short_period)\n",
|
||||
"long_ma = ta.SMA(df['close'], timeperiod=long_period)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will calculate the short-term and long-term moving averages for the stock data and store them in the `short_ma` and `long_ma` variables, respectively. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages using the `concat()` function from pandas. We need to pass the historical stock data, the short-term moving average, and the long-term moving average as arguments to this function. We can use the following code to create the new DataFrame:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"new_df = pd.concat([df, short_ma, long_ma], axis=1)\n",
|
||||
"new_df.columns = ['open', 'high', 'low', 'close', 'volume', 'short_ma', 'long_ma']\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new DataFrame called `new_df` that combines the historical stock data with the short-term and long-term moving averages. The columns of the new DataFrame are named 'open', 'high', 'low', 'close', 'volume', 'short_ma', and 'long_ma'. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages. We can use the following code to create the new column:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"new_df['signal'] = np.where(new_df['short_ma'] > new_df['long_ma'], 1, -1)\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new column called 'signal' in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages. If the short-term moving average is greater than the long-term moving average, the signal is 1 (buy), otherwise the signal is -1 (sell). Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target.\n",
|
||||
"Input: The stop loss and profit target as percentages.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target. We need to pass the stop loss and profit target as percentages as arguments to this function. We can use the following code to create the new column:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"stop_loss = stop_loss_percent / 100\n",
|
||||
"profit_target = profit_target_percent / 100\n",
|
||||
"\n",
|
||||
"new_df['pnl'] = 0.0\n",
|
||||
"buy_price = 0.0\n",
|
||||
"for i in range(1, len(new_df)):\n",
|
||||
" if new_df['signal'][i] == 1 and new_df['signal'][i-1] == -1:\n",
|
||||
" buy_price = new_df['close'][i]\n",
|
||||
" elif new_df['signal'][i] == -1 and new_df['signal'][i-1] == 1:\n",
|
||||
" sell_price = new_df['close'][i]\n",
|
||||
" if sell_price <= buy_price * (1 - stop_loss):\n",
|
||||
" new_df['pnl'][i] = -stop_loss\n",
|
||||
" elif sell_price >= buy_price * (1 + profit_target):\n",
|
||||
" new_df['pnl'][i] = profit_target\n",
|
||||
" else:\n",
|
||||
" new_df['pnl'][i] = (sell_price - buy_price) / buy_price\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a new column called 'pnl' in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target. The stop loss and profit target are calculated based on the stop_loss_percent and profit_target_percent variables, respectively. The buy and sell prices are stored in the buy_price and sell_price variables, respectively. If the sell price is less than or equal to the stop loss, the profit or loss is set to -stop_loss. If the sell price is greater than or equal to the profit target, the profit or loss is set to profit_target. Otherwise, the profit or loss is calculated as (sell_price - buy_price) / buy_price. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Calculate the total profit or loss for all trades.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can calculate the total profit or loss for all trades by summing the values in the 'pnl' column of the DataFrame. We can use the following code to calculate the total profit or loss:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"total_pnl = new_df['pnl'].sum()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will calculate the total profit or loss for all trades and store it in the total_pnl variable. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Visualize the stock data, short-term moving average, and long-term moving average using a line chart.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can visualize the stock data, short-term moving average, and long-term moving average using a line chart using the `plot()` function from pandas. We can use the following code to visualize the data:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.plot(new_df.index, new_df['short_ma'], label='Short MA')\n",
|
||||
"plt.plot(new_df.index, new_df['long_ma'], label='Long MA')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Stock Data with Moving Averages')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a line chart that visualizes the stock data, short-term moving average, and long-term moving average. The x-axis represents the date and the y-axis represents the price. The chart also includes a legend that labels each line. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Visualize the buy and sell signals using a scatter plot.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can visualize the buy and sell signals using a scatter plot using the `scatter()` function from matplotlib. We can use the following code to visualize the signals:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"buy_signals = new_df[new_df['signal'] == 1]\n",
|
||||
"sell_signals = new_df[new_df['signal'] == -1]\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.scatter(buy_signals.index, buy_signals['close'], label='Buy', marker='^', color='green')\n",
|
||||
"plt.scatter(sell_signals.index, sell_signals['close'], label='Sell', marker='v', color='red')\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Buy and Sell Signals')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will create a scatter plot that visualizes the buy and sell signals. The x-axis represents the date and the y-axis represents the price. The chart also includes a legend that labels each point. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Print the total profit or loss for all trades.\n",
|
||||
"Input: None.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: We can print the total profit or loss for all trades using the `print()` function. We can use the following code to print the total profit or loss:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"print('Total Profit/Loss: {:.2%}'.format(total_pnl))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This will print the total profit or loss for all trades in percentage format with two decimal places. Next request.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"Instruction: Finalize the code by putting all the code snippets together in a single Python script.\n",
|
||||
"Input: The necessary inputs for each code snippet.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Solution: Here is the complete Python script that puts all the code snippets together:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import talib as ta\n",
|
||||
"\n",
|
||||
"# Load historical stock data into a pandas DataFrame\n",
|
||||
"df = pd.read_csv('path/to/csv/file.csv')\n",
|
||||
"\n",
|
||||
"# Preprocess the historical stock data\n",
|
||||
"df = df.set_index('date')\n",
|
||||
"df = df.sort_index(ascending=True)\n",
|
||||
"\n",
|
||||
"# Calculate the short-term and long-term moving averages\n",
|
||||
"short_period = 10\n",
|
||||
"long_period = 30\n",
|
||||
"short_ma = ta.SMA(df['close'], timeperiod=short_period)\n",
|
||||
"long_ma = ta.SMA(df['close'], timeperiod=long_period)\n",
|
||||
"\n",
|
||||
"# Create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages\n",
|
||||
"new_df = pd.concat([df, short_ma, long_ma], axis=1)\n",
|
||||
"new_df.columns = ['open', 'high', 'low', 'close', 'volume', 'short_ma', 'long_ma']\n",
|
||||
"\n",
|
||||
"# Create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages\n",
|
||||
"new_df['signal'] = np.where(new_df['short_ma'] > new_df['long_ma'], 1, -1)\n",
|
||||
"\n",
|
||||
"# Create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target\n",
|
||||
"stop_loss_percent = 5\n",
|
||||
"profit_target_percent = 10\n",
|
||||
"stop_loss = stop_loss_percent / 100\n",
|
||||
"profit_target = profit_target_percent / 100\n",
|
||||
"new_df['pnl'] = 0.0\n",
|
||||
"buy_price = 0.0\n",
|
||||
"for i in range(1, len(new_df)):\n",
|
||||
" if new_df['signal'][i] == 1 and new_df['signal'][i-1] == -1:\n",
|
||||
" buy_price = new_df['close'][i]\n",
|
||||
" elif new_df['signal'][i] == -1 and new_df['signal'][i-1] == 1:\n",
|
||||
" sell_price = new_df['close'][i]\n",
|
||||
" if sell_price <= buy_price * (1 - stop_loss):\n",
|
||||
" new_df['pnl'][i] = -stop_loss\n",
|
||||
" elif sell_price >= buy_price * (1 + profit_target):\n",
|
||||
" new_df['pnl'][i] = profit_target\n",
|
||||
" else:\n",
|
||||
" new_df['pnl'][i] = (sell_price - buy_price) / buy_price\n",
|
||||
"\n",
|
||||
"# Calculate the total profit or loss for all trades\n",
|
||||
"total_pnl = new_df['pnl'].sum()\n",
|
||||
"\n",
|
||||
"# Visualize the stock data, short-term moving average, and long-term moving average using a line chart\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.plot(new_df.index, new_df['short_ma'], label='Short MA')\n",
|
||||
"plt.plot(new_df.index, new_df['long_ma'], label='Long MA')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Stock Data with Moving Averages')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# Visualize the buy and sell signals using a scatter plot\n",
|
||||
"buy_signals = new_df[new_df['signal'] == 1]\n",
|
||||
"sell_signals = new_df[new_df['signal'] == -1]\n",
|
||||
"plt.figure(figsize=(12,6))\n",
|
||||
"plt.scatter(buy_signals.index, buy_signals['close'], label='Buy', marker='^', color='green')\n",
|
||||
"plt.scatter(sell_signals.index, sell_signals['close'], label='Sell', marker='v', color='red')\n",
|
||||
"plt.plot(new_df.index, new_df['close'], label='Close')\n",
|
||||
"plt.xlabel('Date')\n",
|
||||
"plt.ylabel('Price')\n",
|
||||
"plt.title('Buy and Sell Signals')\n",
|
||||
"plt.legend()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# Print the total profit or loss for all trades\n",
|
||||
"print('Total Profit/Loss: {:.2%}'.format(total_pnl))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"You need to replace the path/to/csv/file.csv with the actual path to the CSV file containing the historical stock data. You can also adjust the short_period, long_period, stop_loss_percent, and profit_target_percent variables to suit your needs.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI User (Stock Trader):\n",
|
||||
"\n",
|
||||
"<CAMEL_TASK_DONE>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI Assistant (Python Programmer):\n",
|
||||
"\n",
|
||||
"Great! Let me know if you need any further assistance.\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f\"Original task prompt:\\n{task}\\n\")\n",
|
||||
"print(f\"Specified task prompt:\\n{specified_task}\\n\")\n",
|
||||
"\n",
|
||||
"chat_turn_limit, n = 30, 0\n",
|
||||
"while n < chat_turn_limit:\n",
|
||||
" n += 1\n",
|
||||
" user_ai_msg = user_agent.step(assistant_msg)\n",
|
||||
" user_msg = HumanMessage(content=user_ai_msg.content)\n",
|
||||
" print(f\"AI User ({user_role_name}):\\n\\n{user_msg.content}\\n\\n\")\n",
|
||||
"\n",
|
||||
" assistant_ai_msg = assistant_agent.step(user_msg)\n",
|
||||
" assistant_msg = HumanMessage(content=assistant_ai_msg.content)\n",
|
||||
" print(f\"AI Assistant ({assistant_role_name}):\\n\\n{assistant_msg.content}\\n\\n\")\n",
|
||||
" if \"<CAMEL_TASK_DONE>\" in user_msg.content:\n",
|
||||
" break"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "camel",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,557 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup Environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Python Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Install the following Python modules:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install ipykernel python-dotenv cassio pandas langchain_openai langchain langchain-community langchainhub langchain_experimental openai-multi-tool-use-parallel-patch\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load the `.env` File"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Connection is via `cassio` using `auto=True` parameter, and the notebook uses OpenAI. You should create a `.env` file accordingly.\n",
|
||||
"\n",
|
||||
"For Cassandra, set:\n",
|
||||
"```bash\n",
|
||||
"CASSANDRA_CONTACT_POINTS\n",
|
||||
"CASSANDRA_USERNAME\n",
|
||||
"CASSANDRA_PASSWORD\n",
|
||||
"CASSANDRA_KEYSPACE\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"For Astra, set:\n",
|
||||
"```bash\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN\n",
|
||||
"ASTRA_DB_DATABASE_ID\n",
|
||||
"ASTRA_DB_KEYSPACE\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"For example:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"# Connection to Astra:\n",
|
||||
"ASTRA_DB_DATABASE_ID=a1b2c3d4-...\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN=AstraCS:...\n",
|
||||
"ASTRA_DB_KEYSPACE=notebooks\n",
|
||||
"\n",
|
||||
"# Also set \n",
|
||||
"OPENAI_API_KEY=sk-....\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"(You may also modify the below code to directly connect with `cassio`.)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Cassandra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import cassio\n",
|
||||
"\n",
|
||||
"cassio.init(auto=True)\n",
|
||||
"session = cassio.config.resolve_session()\n",
|
||||
"if not session:\n",
|
||||
" raise Exception(\n",
|
||||
" \"Check environment configuration or manually configure cassio connection parameters\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"keyspace = os.environ.get(\n",
|
||||
" \"ASTRA_DB_KEYSPACE\", os.environ.get(\"CASSANDRA_KEYSPACE\", None)\n",
|
||||
")\n",
|
||||
"if not keyspace:\n",
|
||||
" raise ValueError(\"a KEYSPACE environment variable must be set\")\n",
|
||||
"\n",
|
||||
"session.set_keyspace(keyspace)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup Database"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This needs to be done one time only!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Download Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The dataset used is from Kaggle, the [Environmental Sensor Telemetry Data](https://www.kaggle.com/datasets/garystafford/environmental-sensor-data-132k?select=iot_telemetry_data.csv). The next cell will download and unzip the data into a Pandas dataframe. The following cell is instructions to download manually. \n",
|
||||
"\n",
|
||||
"The net result of this section is you should have a Pandas dataframe variable `df`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Download Automatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from io import BytesIO\n",
|
||||
"from zipfile import ZipFile\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"datasetURL = \"https://storage.googleapis.com/kaggle-data-sets/788816/1355729/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20240404%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20240404T115828Z&X-Goog-Expires=259200&X-Goog-SignedHeaders=host&X-Goog-Signature=2849f003b100eb9dcda8dd8535990f51244292f67e4f5fad36f14aa67f2d4297672d8fe6ff5a39f03a29cda051e33e95d36daab5892b8874dcd5a60228df0361fa26bae491dd4371f02dd20306b583a44ba85a4474376188b1f84765147d3b4f05c57345e5de883c2c29653cce1f3755cd8e645c5e952f4fb1c8a735b22f0c811f97f7bce8d0235d0d3731ca8ab4629ff381f3bae9e35fc1b181c1e69a9c7913a5e42d9d52d53e5f716467205af9c8a3cc6746fc5352e8fbc47cd7d18543626bd67996d18c2045c1e475fc136df83df352fa747f1a3bb73e6ba3985840792ec1de407c15836640ec96db111b173bf16115037d53fdfbfd8ac44145d7f9a546aa\"\n",
|
||||
"\n",
|
||||
"response = requests.get(datasetURL)\n",
|
||||
"if response.status_code == 200:\n",
|
||||
" zip_file = ZipFile(BytesIO(response.content))\n",
|
||||
" csv_file_name = zip_file.namelist()[0]\n",
|
||||
"else:\n",
|
||||
" print(\"Failed to download the file\")\n",
|
||||
"\n",
|
||||
"with zip_file.open(csv_file_name) as csv_file:\n",
|
||||
" df = pd.read_csv(csv_file)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Download Manually"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can download the `.zip` file and unpack the `.csv` contained within. Comment in the next line, and adjust the path to this `.csv` file appropriately."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# df = pd.read_csv(\"/path/to/iot_telemetry_data.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Data into Cassandra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This section assumes the existence of a dataframe `df`, the following cell validates its structure. The Download section above creates this object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assert df is not None, \"Dataframe 'df' must be set\"\n",
|
||||
"expected_columns = [\n",
|
||||
" \"ts\",\n",
|
||||
" \"device\",\n",
|
||||
" \"co\",\n",
|
||||
" \"humidity\",\n",
|
||||
" \"light\",\n",
|
||||
" \"lpg\",\n",
|
||||
" \"motion\",\n",
|
||||
" \"smoke\",\n",
|
||||
" \"temp\",\n",
|
||||
"]\n",
|
||||
"assert all([column in df.columns for column in expected_columns]), (\n",
|
||||
" \"DataFrame does not have the expected columns\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create and load tables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import UTC, datetime\n",
|
||||
"\n",
|
||||
"from cassandra.query import BatchStatement\n",
|
||||
"\n",
|
||||
"# Create sensors table\n",
|
||||
"table_query = \"\"\"\n",
|
||||
"CREATE TABLE IF NOT EXISTS iot_sensors (\n",
|
||||
" device text,\n",
|
||||
" conditions text,\n",
|
||||
" room text,\n",
|
||||
" PRIMARY KEY (device)\n",
|
||||
")\n",
|
||||
"WITH COMMENT = 'Environmental IoT room sensor metadata.';\n",
|
||||
"\"\"\"\n",
|
||||
"session.execute(table_query)\n",
|
||||
"\n",
|
||||
"pstmt = session.prepare(\n",
|
||||
" \"\"\"\n",
|
||||
"INSERT INTO iot_sensors (device, conditions, room)\n",
|
||||
"VALUES (?, ?, ?)\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"devices = [\n",
|
||||
" (\"00:0f:00:70:91:0a\", \"stable conditions, cooler and more humid\", \"room 1\"),\n",
|
||||
" (\"1c:bf:ce:15:ec:4d\", \"highly variable temperature and humidity\", \"room 2\"),\n",
|
||||
" (\"b8:27:eb:bf:9d:51\", \"stable conditions, warmer and dryer\", \"room 3\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"for device, conditions, room in devices:\n",
|
||||
" session.execute(pstmt, (device, conditions, room))\n",
|
||||
"\n",
|
||||
"print(\"Sensors inserted successfully.\")\n",
|
||||
"\n",
|
||||
"# Create data table\n",
|
||||
"table_query = \"\"\"\n",
|
||||
"CREATE TABLE IF NOT EXISTS iot_data (\n",
|
||||
" day text,\n",
|
||||
" device text,\n",
|
||||
" ts timestamp,\n",
|
||||
" co double,\n",
|
||||
" humidity double,\n",
|
||||
" light boolean,\n",
|
||||
" lpg double,\n",
|
||||
" motion boolean,\n",
|
||||
" smoke double,\n",
|
||||
" temp double,\n",
|
||||
" PRIMARY KEY ((day, device), ts)\n",
|
||||
")\n",
|
||||
"WITH COMMENT = 'Data from environmental IoT room sensors. Columns include device identifier, timestamp (ts) of the data collection, carbon monoxide level (co), relative humidity, light presence, LPG concentration, motion detection, smoke concentration, and temperature (temp). Data is partitioned by day and device.';\n",
|
||||
"\"\"\"\n",
|
||||
"session.execute(table_query)\n",
|
||||
"\n",
|
||||
"pstmt = session.prepare(\n",
|
||||
" \"\"\"\n",
|
||||
"INSERT INTO iot_data (day, device, ts, co, humidity, light, lpg, motion, smoke, temp)\n",
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def insert_data_batch(name, group):\n",
|
||||
" batch = BatchStatement()\n",
|
||||
" day, device = name\n",
|
||||
" print(f\"Inserting batch for day: {day}, device: {device}\")\n",
|
||||
"\n",
|
||||
" for _, row in group.iterrows():\n",
|
||||
" timestamp = datetime.fromtimestamp(row[\"ts\"], UTC)\n",
|
||||
" batch.add(\n",
|
||||
" pstmt,\n",
|
||||
" (\n",
|
||||
" day,\n",
|
||||
" row[\"device\"],\n",
|
||||
" timestamp,\n",
|
||||
" row[\"co\"],\n",
|
||||
" row[\"humidity\"],\n",
|
||||
" row[\"light\"],\n",
|
||||
" row[\"lpg\"],\n",
|
||||
" row[\"motion\"],\n",
|
||||
" row[\"smoke\"],\n",
|
||||
" row[\"temp\"],\n",
|
||||
" ),\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" session.execute(batch)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Convert columns to appropriate types\n",
|
||||
"df[\"light\"] = df[\"light\"] == \"true\"\n",
|
||||
"df[\"motion\"] = df[\"motion\"] == \"true\"\n",
|
||||
"df[\"ts\"] = df[\"ts\"].astype(float)\n",
|
||||
"df[\"day\"] = df[\"ts\"].apply(\n",
|
||||
" lambda x: datetime.fromtimestamp(x, UTC).strftime(\"%Y-%m-%d\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"grouped_df = df.groupby([\"day\", \"device\"])\n",
|
||||
"\n",
|
||||
"for name, group in grouped_df:\n",
|
||||
" insert_data_batch(name, group)\n",
|
||||
"\n",
|
||||
"print(\"Data load complete\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(session.keyspace)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Python `import` statements for the demo:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_openai_tools_agent\n",
|
||||
"from langchain_community.agent_toolkits.cassandra_database.toolkit import (\n",
|
||||
" CassandraDatabaseToolkit,\n",
|
||||
")\n",
|
||||
"from langchain_community.tools.cassandra_database.prompt import QUERY_PATH_PROMPT\n",
|
||||
"from langchain_community.tools.cassandra_database.tool import (\n",
|
||||
" GetSchemaCassandraDatabaseTool,\n",
|
||||
" GetTableDataCassandraDatabaseTool,\n",
|
||||
" QueryCassandraDatabaseTool,\n",
|
||||
")\n",
|
||||
"from langchain_community.utilities.cassandra_database import CassandraDatabase\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `CassandraDatabase` object is loaded from `cassio`, though it does accept a `Session`-type parameter as an alternative."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a CassandraDatabase instance\n",
|
||||
"db = CassandraDatabase(include_tables=[\"iot_sensors\", \"iot_data\"])\n",
|
||||
"\n",
|
||||
"# Create the Cassandra Database tools\n",
|
||||
"query_tool = QueryCassandraDatabaseTool(db=db)\n",
|
||||
"schema_tool = GetSchemaCassandraDatabaseTool(db=db)\n",
|
||||
"select_data_tool = GetTableDataCassandraDatabaseTool(db=db)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The tools can be invoked directly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test the tools\n",
|
||||
"print(\"Executing a CQL query:\")\n",
|
||||
"query = \"SELECT * FROM iot_sensors LIMIT 5;\"\n",
|
||||
"result = query_tool.run({\"query\": query})\n",
|
||||
"print(result)\n",
|
||||
"\n",
|
||||
"print(\"\\nGetting the schema for a keyspace:\")\n",
|
||||
"schema = schema_tool.run({\"keyspace\": keyspace})\n",
|
||||
"print(schema)\n",
|
||||
"\n",
|
||||
"print(\"\\nGetting data from a table:\")\n",
|
||||
"table = \"iot_data\"\n",
|
||||
"predicate = \"day = '2020-07-14' and device = 'b8:27:eb:bf:9d:51'\"\n",
|
||||
"data = select_data_tool.run(\n",
|
||||
" {\"keyspace\": keyspace, \"table\": table, \"predicate\": predicate, \"limit\": 5}\n",
|
||||
")\n",
|
||||
"print(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Agent Configuration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain_experimental.utilities import PythonREPL\n",
|
||||
"\n",
|
||||
"python_repl = PythonREPL()\n",
|
||||
"\n",
|
||||
"repl_tool = Tool(\n",
|
||||
" name=\"python_repl\",\n",
|
||||
" description=\"A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.\",\n",
|
||||
" func=python_repl.run,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-4-1106-preview\")\n",
|
||||
"toolkit = CassandraDatabaseToolkit(db=db)\n",
|
||||
"\n",
|
||||
"# context = toolkit.get_context()\n",
|
||||
"# tools = toolkit.get_tools()\n",
|
||||
"tools = [schema_tool, select_data_tool, repl_tool]\n",
|
||||
"\n",
|
||||
"input = (\n",
|
||||
" QUERY_PATH_PROMPT\n",
|
||||
" + f\"\"\"\n",
|
||||
"\n",
|
||||
"Here is your task: In the {keyspace} keyspace, find the total number of times the temperature of each device has exceeded 23 degrees on July 14, 2020.\n",
|
||||
" Create a summary report including the name of the room. Use Pandas if helpful.\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = hub.pull(\"hwchase17/openai-tools-agent\")\n",
|
||||
"\n",
|
||||
"# messages = [\n",
|
||||
"# HumanMessagePromptTemplate.from_template(input),\n",
|
||||
"# AIMessage(content=QUERY_PATH_PROMPT),\n",
|
||||
"# MessagesPlaceholder(variable_name=\"agent_scratchpad\"),\n",
|
||||
"# ]\n",
|
||||
"\n",
|
||||
"# prompt = ChatPromptTemplate.from_messages(messages)\n",
|
||||
"# print(prompt)\n",
|
||||
"\n",
|
||||
"# Choose the LLM that will drive the agent\n",
|
||||
"# Only certain models support this\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)\n",
|
||||
"\n",
|
||||
"# Construct the OpenAI Tools agent\n",
|
||||
"agent = create_openai_tools_agent(llm, tools, prompt)\n",
|
||||
"\n",
|
||||
"print(\"Available tools:\")\n",
|
||||
"for tool in tools:\n",
|
||||
" print(\"\\t\" + tool.name + \" - \" + tool.description + \" - \" + str(tool))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\n",
|
||||
"\n",
|
||||
"response = agent_executor.invoke({\"input\": input})\n",
|
||||
"\n",
|
||||
"print(response[\"output\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,554 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ba5f8741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Custom Agent with PlugIn Retrieval\n",
|
||||
"\n",
|
||||
"This notebook combines two concepts in order to build a custom agent that can interact with AI Plugins:\n",
|
||||
"\n",
|
||||
"1. [Custom Agent with Tool Retrieval](/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.html): This introduces the concept of retrieving many tools, which is useful when trying to work with arbitrarily many plugins.\n",
|
||||
"2. [Natural Language API Chains](/docs/use_cases/apis/openapi.html): This creates Natural Language wrappers around OpenAPI endpoints. This is useful because (1) plugins use OpenAPI endpoints under the hood, (2) wrapping them in an NLAChain allows the router agent to call it more easily.\n",
|
||||
"\n",
|
||||
"The novel idea introduced in this notebook is the idea of using retrieval to select not the tools explicitly, but the set of OpenAPI specs to use. We can then generate tools from those OpenAPI specs. The use case for this is when trying to get agents to use plugins. It may be more efficient to choose plugins first, then the endpoints, rather than the endpoints directly. This is because the plugins may contain more useful information for selection."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fea4812c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up environment\n",
|
||||
"\n",
|
||||
"Do necessary imports, etc."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "9af9734e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"from typing import Union\n",
|
||||
"\n",
|
||||
"from langchain.agents import (\n",
|
||||
" AgentExecutor,\n",
|
||||
" AgentOutputParser,\n",
|
||||
" LLMSingleActionAgent,\n",
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2f91d8b4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a1a3b59c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6df0253f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up plugins\n",
|
||||
"\n",
|
||||
"Load and index plugins"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "becda2a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"urls = [\n",
|
||||
" \"https://datasette.io/.well-known/ai-plugin.json\",\n",
|
||||
" \"https://api.speak.com/.well-known/ai-plugin.json\",\n",
|
||||
" \"https://www.wolframalpha.com/.well-known/ai-plugin.json\",\n",
|
||||
" \"https://www.zapier.com/.well-known/ai-plugin.json\",\n",
|
||||
" \"https://www.klarna.com/.well-known/ai-plugin.json\",\n",
|
||||
" \"https://www.joinmilo.com/.well-known/ai-plugin.json\",\n",
|
||||
" \"https://slack.com/.well-known/ai-plugin.json\",\n",
|
||||
" \"https://schooldigger.com/.well-known/ai-plugin.json\",\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"AI_PLUGINS = [AIPlugin.from_url(url) for url in urls]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17362717",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool Retriever\n",
|
||||
"\n",
|
||||
"We will use a vectorstore to create embeddings for each tool description. Then, for an incoming query we can create embeddings for that query and do a similarity search for relevant tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "77c4be4b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9092a158",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.2 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load a Swagger 2.0 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=plugin.description_for_model,\n",
|
||||
" metadata={\"plugin_name\": plugin.name_for_model},\n",
|
||||
" )\n",
|
||||
" for plugin in AI_PLUGINS\n",
|
||||
"]\n",
|
||||
"vector_store = FAISS.from_documents(docs, embeddings)\n",
|
||||
"toolkits_dict = {\n",
|
||||
" plugin.name_for_model: NLAToolkit.from_llm_and_ai_plugin(llm, plugin)\n",
|
||||
" for plugin in AI_PLUGINS\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "735a7566",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" # Get the toolkits, one for each plugin\n",
|
||||
" tool_kits = [toolkits_dict[d.metadata[\"plugin_name\"]] for d in docs]\n",
|
||||
" # Get the tools: a separate NLAChain for each endpoint\n",
|
||||
" tools = []\n",
|
||||
" for tk in tool_kits:\n",
|
||||
" tools.extend(tk.nla_tools)\n",
|
||||
" return tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7699afd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now test this retriever to see if it seems to work."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "425f2886",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Milo.askMilo',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.search_all_actions',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.preview_a_zap',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.get_configuration_link',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.list_exposed_actions',\n",
|
||||
" 'SchoolDigger_API_V2.0.Autocomplete_GetSchools',\n",
|
||||
" 'SchoolDigger_API_V2.0.Districts_GetAllDistricts2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Districts_GetDistrict2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Rankings_GetSchoolRank2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Rankings_GetRank_District',\n",
|
||||
" 'SchoolDigger_API_V2.0.Schools_GetAllSchools20',\n",
|
||||
" 'SchoolDigger_API_V2.0.Schools_GetSchool20',\n",
|
||||
" 'Speak.translate',\n",
|
||||
" 'Speak.explainPhrase',\n",
|
||||
" 'Speak.explainTask']"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = get_tools(\"What could I do today with my kiddo\")\n",
|
||||
"[t.name for t in tools]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "3aa88768",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Open_AI_Klarna_product_Api.productsUsingGET',\n",
|
||||
" 'Milo.askMilo',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.search_all_actions',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.preview_a_zap',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.get_configuration_link',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.list_exposed_actions',\n",
|
||||
" 'SchoolDigger_API_V2.0.Autocomplete_GetSchools',\n",
|
||||
" 'SchoolDigger_API_V2.0.Districts_GetAllDistricts2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Districts_GetDistrict2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Rankings_GetSchoolRank2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Rankings_GetRank_District',\n",
|
||||
" 'SchoolDigger_API_V2.0.Schools_GetAllSchools20',\n",
|
||||
" 'SchoolDigger_API_V2.0.Schools_GetSchool20']"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = get_tools(\"what shirts can i buy?\")\n",
|
||||
"[t.name for t in tools]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e7a075c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prompt Template\n",
|
||||
"\n",
|
||||
"The prompt template is pretty standard, because we're not actually changing that much logic in the actual prompt template, but rather we are just changing how retrieval is done."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "339b1bb8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set up the base template\n",
|
||||
"template = \"\"\"Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:\n",
|
||||
"\n",
|
||||
"{tools}\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: the input question you must answer\n",
|
||||
"Thought: you should always think about what to do\n",
|
||||
"Action: the action to take, should be one of [{tool_names}]\n",
|
||||
"Action Input: the input to the action\n",
|
||||
"Observation: the result of the action\n",
|
||||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||||
"Thought: I now know the final answer\n",
|
||||
"Final Answer: the final answer to the original input question\n",
|
||||
"\n",
|
||||
"Begin! Remember to speak as a pirate when giving your final answer. Use lots of \"Arg\"s\n",
|
||||
"\n",
|
||||
"Question: {input}\n",
|
||||
"{agent_scratchpad}\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1583acdc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The custom prompt template now has the concept of a tools_getter, which we call on the input to select the tools to use"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "fd969d31",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Set up a prompt template\n",
|
||||
"class CustomPromptTemplate(StringPromptTemplate):\n",
|
||||
" # The template to use\n",
|
||||
" template: str\n",
|
||||
" ############## NEW ######################\n",
|
||||
" # The list of tools available\n",
|
||||
" tools_getter: Callable\n",
|
||||
"\n",
|
||||
" def format(self, **kwargs) -> str:\n",
|
||||
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
|
||||
" # Format them in a particular way\n",
|
||||
" intermediate_steps = kwargs.pop(\"intermediate_steps\")\n",
|
||||
" thoughts = \"\"\n",
|
||||
" for action, observation in intermediate_steps:\n",
|
||||
" thoughts += action.log\n",
|
||||
" thoughts += f\"\\nObservation: {observation}\\nThought: \"\n",
|
||||
" # Set the agent_scratchpad variable to that value\n",
|
||||
" kwargs[\"agent_scratchpad\"] = thoughts\n",
|
||||
" ############## NEW ######################\n",
|
||||
" tools = self.tools_getter(kwargs[\"input\"])\n",
|
||||
" # Create a tools variable from the list of tools provided\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join(\n",
|
||||
" [f\"{tool.name}: {tool.description}\" for tool in tools]\n",
|
||||
" )\n",
|
||||
" # Create a list of tool names for the tools provided\n",
|
||||
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in tools])\n",
|
||||
" return self.template.format(**kwargs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "798ef9fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = CustomPromptTemplate(\n",
|
||||
" template=template,\n",
|
||||
" tools_getter=get_tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ef3a1af3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output Parser\n",
|
||||
"\n",
|
||||
"The output parser is unchanged from the previous notebook, since we are not changing anything about the output format."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "7c6fe0d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
" return AgentFinish(\n",
|
||||
" # Return values is generally always a dictionary with a single `output` key\n",
|
||||
" # It is not recommended to try anything else at the moment :)\n",
|
||||
" return_values={\"output\": llm_output.split(\"Final Answer:\")[-1].strip()},\n",
|
||||
" log=llm_output,\n",
|
||||
" )\n",
|
||||
" # Parse out the action and action input\n",
|
||||
" regex = r\"Action\\s*\\d*\\s*:(.*?)\\nAction\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)\"\n",
|
||||
" match = re.search(regex, llm_output, re.DOTALL)\n",
|
||||
" if not match:\n",
|
||||
" raise ValueError(f\"Could not parse LLM output: `{llm_output}`\")\n",
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" # Return the action and action input\n",
|
||||
" return AgentAction(\n",
|
||||
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "d278706a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output_parser = CustomOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "170587b1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up LLM, stop sequence, and the agent\n",
|
||||
"\n",
|
||||
"Also the same as the previous notebook"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "f9d4c374",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "9b1cc2a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# LLM chain consisting of the LLM and a prompt\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "e4f5092f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aa8a5326",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use the Agent\n",
|
||||
"\n",
|
||||
"Now we can use it!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "490604e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "653b1617",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find a product API\n",
|
||||
"Action: Open_AI_Klarna_product_Api.productsUsingGET\n",
|
||||
"Action Input: shirts\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation:\u001b[36;1m\u001b[1;3mI found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns.\u001b[0m\u001b[32;1m\u001b[1;3m I now know what shirts I can buy\n",
|
||||
"Final Answer: Arg, I found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Arg, I found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"what shirts can i buy?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2481ee76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "18784188d7ecd866c0586ac068b02361a6896dc3a29b64f5cc957f09c590acef"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,578 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ba5f8741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Plug-and-Plai\n",
|
||||
"\n",
|
||||
"This notebook builds upon the idea of [plugin retrieval](./custom_agent_with_plugin_retrieval.html), but pulls all tools from `plugnplai` - a directory of AI Plugins."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fea4812c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up environment\n",
|
||||
"\n",
|
||||
"Do necessary imports, etc."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aca08be8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Install plugnplai lib to get a list of active plugins from https://plugplai.com directory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "52e248c9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.1.1\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pip install plugnplai -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9af9734e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"from typing import Union\n",
|
||||
"\n",
|
||||
"import plugnplai\n",
|
||||
"from langchain.agents import (\n",
|
||||
" AgentExecutor,\n",
|
||||
" AgentOutputParser,\n",
|
||||
" LLMSingleActionAgent,\n",
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2f91d8b4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "a1a3b59c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6df0253f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up plugins\n",
|
||||
"\n",
|
||||
"Load and index plugins"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "9e0f7882",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get all plugins from plugnplai.com\n",
|
||||
"urls = plugnplai.get_plugins()\n",
|
||||
"\n",
|
||||
"# Get ChatGPT plugins - only ChatGPT verified plugins\n",
|
||||
"urls = plugnplai.get_plugins(filter=\"ChatGPT\")\n",
|
||||
"\n",
|
||||
"# Get working plugins - only tested plugins (in progress)\n",
|
||||
"urls = plugnplai.get_plugins(filter=\"working\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"AI_PLUGINS = [AIPlugin.from_url(url + \"/.well-known/ai-plugin.json\") for url in urls]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17362717",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool Retriever\n",
|
||||
"\n",
|
||||
"We will use a vectorstore to create embeddings for each tool description. Then, for an incoming query we can create embeddings for that query and do a similarity search for relevant tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "77c4be4b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9092a158",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.2 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n",
|
||||
"Attempting to load a Swagger 2.0 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=plugin.description_for_model,\n",
|
||||
" metadata={\"plugin_name\": plugin.name_for_model},\n",
|
||||
" )\n",
|
||||
" for plugin in AI_PLUGINS\n",
|
||||
"]\n",
|
||||
"vector_store = FAISS.from_documents(docs, embeddings)\n",
|
||||
"toolkits_dict = {\n",
|
||||
" plugin.name_for_model: NLAToolkit.from_llm_and_ai_plugin(llm, plugin)\n",
|
||||
" for plugin in AI_PLUGINS\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "735a7566",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" # Get the toolkits, one for each plugin\n",
|
||||
" tool_kits = [toolkits_dict[d.metadata[\"plugin_name\"]] for d in docs]\n",
|
||||
" # Get the tools: a separate NLAChain for each endpoint\n",
|
||||
" tools = []\n",
|
||||
" for tk in tool_kits:\n",
|
||||
" tools.extend(tk.nla_tools)\n",
|
||||
" return tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7699afd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now test this retriever to see if it seems to work."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "425f2886",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Milo.askMilo',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.search_all_actions',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.preview_a_zap',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.get_configuration_link',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.list_exposed_actions',\n",
|
||||
" 'SchoolDigger_API_V2.0.Autocomplete_GetSchools',\n",
|
||||
" 'SchoolDigger_API_V2.0.Districts_GetAllDistricts2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Districts_GetDistrict2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Rankings_GetSchoolRank2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Rankings_GetRank_District',\n",
|
||||
" 'SchoolDigger_API_V2.0.Schools_GetAllSchools20',\n",
|
||||
" 'SchoolDigger_API_V2.0.Schools_GetSchool20',\n",
|
||||
" 'Speak.translate',\n",
|
||||
" 'Speak.explainPhrase',\n",
|
||||
" 'Speak.explainTask']"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = get_tools(\"What could I do today with my kiddo\")\n",
|
||||
"[t.name for t in tools]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "3aa88768",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Open_AI_Klarna_product_Api.productsUsingGET',\n",
|
||||
" 'Milo.askMilo',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.search_all_actions',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.preview_a_zap',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.get_configuration_link',\n",
|
||||
" 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.list_exposed_actions',\n",
|
||||
" 'SchoolDigger_API_V2.0.Autocomplete_GetSchools',\n",
|
||||
" 'SchoolDigger_API_V2.0.Districts_GetAllDistricts2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Districts_GetDistrict2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Rankings_GetSchoolRank2',\n",
|
||||
" 'SchoolDigger_API_V2.0.Rankings_GetRank_District',\n",
|
||||
" 'SchoolDigger_API_V2.0.Schools_GetAllSchools20',\n",
|
||||
" 'SchoolDigger_API_V2.0.Schools_GetSchool20']"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = get_tools(\"what shirts can i buy?\")\n",
|
||||
"[t.name for t in tools]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e7a075c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prompt Template\n",
|
||||
"\n",
|
||||
"The prompt template is pretty standard, because we're not actually changing that much logic in the actual prompt template, but rather we are just changing how retrieval is done."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "339b1bb8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set up the base template\n",
|
||||
"template = \"\"\"Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:\n",
|
||||
"\n",
|
||||
"{tools}\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: the input question you must answer\n",
|
||||
"Thought: you should always think about what to do\n",
|
||||
"Action: the action to take, should be one of [{tool_names}]\n",
|
||||
"Action Input: the input to the action\n",
|
||||
"Observation: the result of the action\n",
|
||||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||||
"Thought: I now know the final answer\n",
|
||||
"Final Answer: the final answer to the original input question\n",
|
||||
"\n",
|
||||
"Begin! Remember to speak as a pirate when giving your final answer. Use lots of \"Arg\"s\n",
|
||||
"\n",
|
||||
"Question: {input}\n",
|
||||
"{agent_scratchpad}\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1583acdc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The custom prompt template now has the concept of a tools_getter, which we call on the input to select the tools to use"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "fd969d31",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Set up a prompt template\n",
|
||||
"class CustomPromptTemplate(StringPromptTemplate):\n",
|
||||
" # The template to use\n",
|
||||
" template: str\n",
|
||||
" ############## NEW ######################\n",
|
||||
" # The list of tools available\n",
|
||||
" tools_getter: Callable\n",
|
||||
"\n",
|
||||
" def format(self, **kwargs) -> str:\n",
|
||||
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
|
||||
" # Format them in a particular way\n",
|
||||
" intermediate_steps = kwargs.pop(\"intermediate_steps\")\n",
|
||||
" thoughts = \"\"\n",
|
||||
" for action, observation in intermediate_steps:\n",
|
||||
" thoughts += action.log\n",
|
||||
" thoughts += f\"\\nObservation: {observation}\\nThought: \"\n",
|
||||
" # Set the agent_scratchpad variable to that value\n",
|
||||
" kwargs[\"agent_scratchpad\"] = thoughts\n",
|
||||
" ############## NEW ######################\n",
|
||||
" tools = self.tools_getter(kwargs[\"input\"])\n",
|
||||
" # Create a tools variable from the list of tools provided\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join(\n",
|
||||
" [f\"{tool.name}: {tool.description}\" for tool in tools]\n",
|
||||
" )\n",
|
||||
" # Create a list of tool names for the tools provided\n",
|
||||
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in tools])\n",
|
||||
" return self.template.format(**kwargs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "798ef9fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = CustomPromptTemplate(\n",
|
||||
" template=template,\n",
|
||||
" tools_getter=get_tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ef3a1af3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output Parser\n",
|
||||
"\n",
|
||||
"The output parser is unchanged from the previous notebook, since we are not changing anything about the output format."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "7c6fe0d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
" return AgentFinish(\n",
|
||||
" # Return values is generally always a dictionary with a single `output` key\n",
|
||||
" # It is not recommended to try anything else at the moment :)\n",
|
||||
" return_values={\"output\": llm_output.split(\"Final Answer:\")[-1].strip()},\n",
|
||||
" log=llm_output,\n",
|
||||
" )\n",
|
||||
" # Parse out the action and action input\n",
|
||||
" regex = r\"Action\\s*\\d*\\s*:(.*?)\\nAction\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)\"\n",
|
||||
" match = re.search(regex, llm_output, re.DOTALL)\n",
|
||||
" if not match:\n",
|
||||
" raise ValueError(f\"Could not parse LLM output: `{llm_output}`\")\n",
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" # Return the action and action input\n",
|
||||
" return AgentAction(\n",
|
||||
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "d278706a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output_parser = CustomOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "170587b1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up LLM, stop sequence, and the agent\n",
|
||||
"\n",
|
||||
"Also the same as the previous notebook"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "f9d4c374",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "9b1cc2a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# LLM chain consisting of the LLM and a prompt\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "e4f5092f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aa8a5326",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use the Agent\n",
|
||||
"\n",
|
||||
"Now we can use it!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "490604e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "653b1617",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find a product API\n",
|
||||
"Action: Open_AI_Klarna_product_Api.productsUsingGET\n",
|
||||
"Action Input: shirts\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation:\u001b[36;1m\u001b[1;3mI found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns.\u001b[0m\u001b[32;1m\u001b[1;3m I now know what shirts I can buy\n",
|
||||
"Final Answer: Arg, I found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Arg, I found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"what shirts can i buy?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2481ee76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "3ccef4e08d87aa1eeb90f63e0f071292ccb2e9c42e70f74ab2bf6f5493ca7bbc"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,500 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ba5f8741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Custom agent with tool retrieval\n",
|
||||
"\n",
|
||||
"The novel idea introduced in this notebook is the idea of using retrieval to select the set of tools to use to answer an agent query. This is useful when you have many many tools to select from. You cannot put the description of all the tools in the prompt (because of context length issues) so instead you dynamically select the N tools you do want to consider using at run time.\n",
|
||||
"\n",
|
||||
"In this notebook we will create a somewhat contrived example. We will have one legitimate tool (search) and then 99 fake tools which are just nonsense. We will then add a step in the prompt template that takes the user input and retrieves tool relevant to the query."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fea4812c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up environment\n",
|
||||
"\n",
|
||||
"Do necessary imports, etc."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "9af9734e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"from typing import Union\n",
|
||||
"\n",
|
||||
"from langchain.agents import (\n",
|
||||
" AgentExecutor,\n",
|
||||
" AgentOutputParser,\n",
|
||||
" LLMSingleActionAgent,\n",
|
||||
" Tool,\n",
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6df0253f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up tools\n",
|
||||
"\n",
|
||||
"We will create one legitimate tool (search) and then 99 fake tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "becda2a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define which tools the agent can use to answer user queries\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"search_tool = Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def fake_func(inp: str) -> str:\n",
|
||||
" return \"foo\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"fake_tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=f\"foo-{i}\",\n",
|
||||
" func=fake_func,\n",
|
||||
" description=f\"a silly function that you can use to get more information about the number {i}\",\n",
|
||||
" )\n",
|
||||
" for i in range(99)\n",
|
||||
"]\n",
|
||||
"ALL_TOOLS = [search_tool] + fake_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17362717",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool Retriever\n",
|
||||
"\n",
|
||||
"We will use a vector store to create embeddings for each tool description. Then, for an incoming query we can create embeddings for that query and do a similarity search for relevant tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "77c4be4b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9092a158",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = [\n",
|
||||
" Document(page_content=t.description, metadata={\"index\": i})\n",
|
||||
" for i, t in enumerate(ALL_TOOLS)\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "affc4e56",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vector_store = FAISS.from_documents(docs, OpenAIEmbeddings())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "735a7566",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" return [ALL_TOOLS[d.metadata[\"index\"]] for d in docs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7699afd7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now test this retriever to see if it seems to work."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "425f2886",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Tool(name='Search', description='useful for when you need to answer questions about current events', return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x114b28a90>, func=<bound method SerpAPIWrapper.run of SerpAPIWrapper(search_engine=<class 'serpapi.google_search.GoogleSearch'>, params={'engine': 'google', 'google_domain': 'google.com', 'gl': 'us', 'hl': 'en'}, serpapi_api_key='', aiosession=None)>, coroutine=None),\n",
|
||||
" Tool(name='foo-95', description='a silly function that you can use to get more information about the number 95', return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x114b28a90>, func=<function fake_func at 0x15e5bd1f0>, coroutine=None),\n",
|
||||
" Tool(name='foo-12', description='a silly function that you can use to get more information about the number 12', return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x114b28a90>, func=<function fake_func at 0x15e5bd1f0>, coroutine=None),\n",
|
||||
" Tool(name='foo-15', description='a silly function that you can use to get more information about the number 15', return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x114b28a90>, func=<function fake_func at 0x15e5bd1f0>, coroutine=None)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"get_tools(\"whats the weather?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "4036dd19",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Tool(name='foo-13', description='a silly function that you can use to get more information about the number 13', return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x114b28a90>, func=<function fake_func at 0x15e5bd1f0>, coroutine=None),\n",
|
||||
" Tool(name='foo-12', description='a silly function that you can use to get more information about the number 12', return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x114b28a90>, func=<function fake_func at 0x15e5bd1f0>, coroutine=None),\n",
|
||||
" Tool(name='foo-14', description='a silly function that you can use to get more information about the number 14', return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x114b28a90>, func=<function fake_func at 0x15e5bd1f0>, coroutine=None),\n",
|
||||
" Tool(name='foo-11', description='a silly function that you can use to get more information about the number 11', return_direct=False, verbose=False, callback_manager=<langchain.callbacks.shared.SharedCallbackManager object at 0x114b28a90>, func=<function fake_func at 0x15e5bd1f0>, coroutine=None)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"get_tools(\"whats the number 13?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e7a075c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prompt template\n",
|
||||
"\n",
|
||||
"The prompt template is pretty standard, because we're not actually changing that much logic in the actual prompt template, but rather we are just changing how retrieval is done."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "339b1bb8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set up the base template\n",
|
||||
"template = \"\"\"Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:\n",
|
||||
"\n",
|
||||
"{tools}\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: the input question you must answer\n",
|
||||
"Thought: you should always think about what to do\n",
|
||||
"Action: the action to take, should be one of [{tool_names}]\n",
|
||||
"Action Input: the input to the action\n",
|
||||
"Observation: the result of the action\n",
|
||||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||||
"Thought: I now know the final answer\n",
|
||||
"Final Answer: the final answer to the original input question\n",
|
||||
"\n",
|
||||
"Begin! Remember to speak as a pirate when giving your final answer. Use lots of \"Arg\"s\n",
|
||||
"\n",
|
||||
"Question: {input}\n",
|
||||
"{agent_scratchpad}\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1583acdc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The custom prompt template now has the concept of a `tools_getter`, which we call on the input to select the tools to use."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 52,
|
||||
"id": "fd969d31",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Callable\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Set up a prompt template\n",
|
||||
"class CustomPromptTemplate(StringPromptTemplate):\n",
|
||||
" # The template to use\n",
|
||||
" template: str\n",
|
||||
" ############## NEW ######################\n",
|
||||
" # The list of tools available\n",
|
||||
" tools_getter: Callable\n",
|
||||
"\n",
|
||||
" def format(self, **kwargs) -> str:\n",
|
||||
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
|
||||
" # Format them in a particular way\n",
|
||||
" intermediate_steps = kwargs.pop(\"intermediate_steps\")\n",
|
||||
" thoughts = \"\"\n",
|
||||
" for action, observation in intermediate_steps:\n",
|
||||
" thoughts += action.log\n",
|
||||
" thoughts += f\"\\nObservation: {observation}\\nThought: \"\n",
|
||||
" # Set the agent_scratchpad variable to that value\n",
|
||||
" kwargs[\"agent_scratchpad\"] = thoughts\n",
|
||||
" ############## NEW ######################\n",
|
||||
" tools = self.tools_getter(kwargs[\"input\"])\n",
|
||||
" # Create a tools variable from the list of tools provided\n",
|
||||
" kwargs[\"tools\"] = \"\\n\".join(\n",
|
||||
" [f\"{tool.name}: {tool.description}\" for tool in tools]\n",
|
||||
" )\n",
|
||||
" # Create a list of tool names for the tools provided\n",
|
||||
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in tools])\n",
|
||||
" return self.template.format(**kwargs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"id": "798ef9fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = CustomPromptTemplate(\n",
|
||||
" template=template,\n",
|
||||
" tools_getter=get_tools,\n",
|
||||
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
|
||||
" # This includes the `intermediate_steps` variable because that is needed\n",
|
||||
" input_variables=[\"input\", \"intermediate_steps\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ef3a1af3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Output parser\n",
|
||||
"\n",
|
||||
"The output parser is unchanged from the previous notebook, since we are not changing anything about the output format."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 54,
|
||||
"id": "7c6fe0d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomOutputParser(AgentOutputParser):\n",
|
||||
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
|
||||
" # Check if agent should finish\n",
|
||||
" if \"Final Answer:\" in llm_output:\n",
|
||||
" return AgentFinish(\n",
|
||||
" # Return values is generally always a dictionary with a single `output` key\n",
|
||||
" # It is not recommended to try anything else at the moment :)\n",
|
||||
" return_values={\"output\": llm_output.split(\"Final Answer:\")[-1].strip()},\n",
|
||||
" log=llm_output,\n",
|
||||
" )\n",
|
||||
" # Parse out the action and action input\n",
|
||||
" regex = r\"Action\\s*\\d*\\s*:(.*?)\\nAction\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)\"\n",
|
||||
" match = re.search(regex, llm_output, re.DOTALL)\n",
|
||||
" if not match:\n",
|
||||
" raise ValueError(f\"Could not parse LLM output: `{llm_output}`\")\n",
|
||||
" action = match.group(1).strip()\n",
|
||||
" action_input = match.group(2)\n",
|
||||
" # Return the action and action input\n",
|
||||
" return AgentAction(\n",
|
||||
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"id": "d278706a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output_parser = CustomOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "170587b1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up LLM, stop sequence, and the agent\n",
|
||||
"\n",
|
||||
"Also the same as the previous notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"id": "f9d4c374",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "9b1cc2a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# LLM chain consisting of the LLM and a prompt\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "e4f5092f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = get_tools(\"whats the weather?\")\n",
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = LLMSingleActionAgent(\n",
|
||||
" llm_chain=llm_chain,\n",
|
||||
" output_parser=output_parser,\n",
|
||||
" stop=[\"\\nObservation:\"],\n",
|
||||
" allowed_tools=tool_names,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aa8a5326",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use the Agent\n",
|
||||
"\n",
|
||||
"Now we can use it!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "490604e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"id": "653b1617",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mThought: I need to find out what the weather is in SF\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Weather in SF\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation:\u001b[36;1m\u001b[1;3mMostly cloudy skies early, then partly cloudy in the afternoon. High near 60F. ENE winds shifting to W at 10 to 15 mph. Humidity71%. UV Index6 of 10.\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: 'Arg, 'tis mostly cloudy skies early, then partly cloudy in the afternoon. High near 60F. ENE winds shiftin' to W at 10 to 15 mph. Humidity71%. UV Index6 of 10.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"'Arg, 'tis mostly cloudy skies early, then partly cloudy in the afternoon. High near 60F. ENE winds shiftin' to W at 10 to 15 mph. Humidity71%. UV Index6 of 10.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 60,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"What's the weather in SF?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2481ee76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "18784188d7ecd866c0586ac068b02361a6896dc3a29b64f5cc957f09c590acef"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,220 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ba5f8741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Custom multi-action agent\n",
|
||||
"\n",
|
||||
"This notebook goes through how to create your own custom agent.\n",
|
||||
"\n",
|
||||
"An agent consists of two parts:\n",
|
||||
"\n",
|
||||
"- Tools: The tools the agent has available to use.\n",
|
||||
"- The agent class itself: this decides which action to take.\n",
|
||||
" \n",
|
||||
" \n",
|
||||
"In this notebook we walk through how to create a custom agent that predicts/takes multiple steps at a time."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "9af9734e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, BaseMultiActionAgent, Tool\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d7c4ebdc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def random_word(query: str) -> str:\n",
|
||||
" print(\"\\nNow I'm doing this!\")\n",
|
||||
" return \"foo\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "becda2a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Search\",\n",
|
||||
" func=search.run,\n",
|
||||
" description=\"useful for when you need to answer questions about current events\",\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name=\"RandomWord\",\n",
|
||||
" func=random_word,\n",
|
||||
" description=\"call this to get a random word.\",\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "a33e2f7e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, List, Tuple, Union\n",
|
||||
"\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class FakeAgent(BaseMultiActionAgent):\n",
|
||||
" \"\"\"Fake Custom Agent.\"\"\"\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def input_keys(self):\n",
|
||||
" return [\"input\"]\n",
|
||||
"\n",
|
||||
" def plan(\n",
|
||||
" self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
|
||||
" ) -> Union[List[AgentAction], AgentFinish]:\n",
|
||||
" \"\"\"Given input, decided what to do.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" intermediate_steps: Steps the LLM has taken to date,\n",
|
||||
" along with observations\n",
|
||||
" **kwargs: User inputs.\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" Action specifying what tool to use.\n",
|
||||
" \"\"\"\n",
|
||||
" if len(intermediate_steps) == 0:\n",
|
||||
" return [\n",
|
||||
" AgentAction(tool=\"Search\", tool_input=kwargs[\"input\"], log=\"\"),\n",
|
||||
" AgentAction(tool=\"RandomWord\", tool_input=kwargs[\"input\"], log=\"\"),\n",
|
||||
" ]\n",
|
||||
" else:\n",
|
||||
" return AgentFinish(return_values={\"output\": \"bar\"}, log=\"\")\n",
|
||||
"\n",
|
||||
" async def aplan(\n",
|
||||
" self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
|
||||
" ) -> Union[List[AgentAction], AgentFinish]:\n",
|
||||
" \"\"\"Given input, decided what to do.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" intermediate_steps: Steps the LLM has taken to date,\n",
|
||||
" along with observations\n",
|
||||
" **kwargs: User inputs.\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" Action specifying what tool to use.\n",
|
||||
" \"\"\"\n",
|
||||
" if len(intermediate_steps) == 0:\n",
|
||||
" return [\n",
|
||||
" AgentAction(tool=\"Search\", tool_input=kwargs[\"input\"], log=\"\"),\n",
|
||||
" AgentAction(tool=\"RandomWord\", tool_input=kwargs[\"input\"], log=\"\"),\n",
|
||||
" ]\n",
|
||||
" else:\n",
|
||||
" return AgentFinish(return_values={\"output\": \"bar\"}, log=\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "655d72f6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = FakeAgent()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "490604e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
|
||||
" agent=agent, tools=tools, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "653b1617",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\u001b[0m\u001b[36;1m\u001b[1;3mThe current population of Canada is 38,669,152 as of Monday, April 24, 2023, based on Worldometer elaboration of the latest United Nations data.\u001b[0m\u001b[32;1m\u001b[1;3m\u001b[0m\n",
|
||||
"Now I'm doing this!\n",
|
||||
"\u001b[33;1m\u001b[1;3mfoo\u001b[0m\u001b[32;1m\u001b[1;3m\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'bar'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent_executor.run(\"How many people live in canada as of 2023?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "adefb4c2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "18784188d7ecd866c0586ac068b02361a6896dc3a29b64f5cc957f09c590acef"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,255 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# QA using Activeloop's DeepLake\n",
|
||||
"In this tutorial, we are going to use Langchain + Activeloop's Deep Lake with GPT4 to semantically search and ask questions over a group chat.\n",
|
||||
"\n",
|
||||
"View a working demo [here](https://twitter.com/thisissukh_/status/1647223328363679745)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Install required packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!python3 -m pip install --upgrade langchain 'deeplake[enterprise]' openai tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Add API keys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain_community.vectorstores import DeepLake\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import (\n",
|
||||
" CharacterTextSplitter,\n",
|
||||
" RecursiveCharacterTextSplitter,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",
|
||||
"os.environ[\"ACTIVELOOP_TOKEN\"] = activeloop_token\n",
|
||||
"os.environ[\"ACTIVELOOP_ORG\"] = getpass.getpass(\"Activeloop Org:\")\n",
|
||||
"\n",
|
||||
"org_id = os.environ[\"ACTIVELOOP_ORG\"]\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"dataset_path = \"hub://\" + org_id + \"/data\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"## 2. Create sample data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can generate a sample group chat conversation using ChatGPT with this prompt:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"Generate a group chat conversation with three friends talking about their day, referencing real places and fictional names. Make it funny and as detailed as possible.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"I've already generated such a chat in `messages.txt`. We can keep it simple and use this for our example.\n",
|
||||
"\n",
|
||||
"## 3. Ingest chat embeddings\n",
|
||||
"\n",
|
||||
"We load the messages in the text file, chunk and upload to ActiveLoop Vector store."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='Participants:\\n\\nJerry: Loves movies and is a bit of a klutz.\\nSamantha: Enthusiastic about food and always trying new restaurants.\\nBarry: A nature lover, but always manages to get lost.\\nJerry: Hey, guys! You won\\'t believe what happened to me at the Times Square AMC theater. I tripped over my own feet and spilled popcorn everywhere! 🍿💥\\n\\nSamantha: LOL, that\\'s so you, Jerry! Was the floor buttery enough for you to ice skate on after that? 😂\\n\\nBarry: Sounds like a regular Tuesday for you, Jerry. Meanwhile, I tried to find that new hiking trail in Central Park. You know, the one that\\'s supposed to be impossible to get lost on? Well, guess what...\\n\\nJerry: You found a hidden treasure?\\n\\nBarry: No, I got lost. AGAIN. 🧭🙄\\n\\nSamantha: Barry, you\\'d get lost in your own backyard! But speaking of treasures, I found this new sushi place in Little Tokyo. \"Samantha\\'s Sushi Symphony\" it\\'s called. Coincidence? I think not!\\n\\nJerry: Maybe they named it after your ability to eat your body weight in sushi. 🍣', metadata={}), Document(page_content='Barry: How do you even FIND all these places, Samantha?\\n\\nSamantha: Simple, I don\\'t rely on Barry\\'s navigation skills. 😉 But seriously, the wasabi there was hotter than Jerry\\'s love for Marvel movies!\\n\\nJerry: Hey, nothing wrong with a little superhero action. By the way, did you guys see the new \"Captain Crunch: Breakfast Avenger\" trailer?\\n\\nSamantha: Captain Crunch? Are you sure you didn\\'t get that from one of your Saturday morning cereal binges?\\n\\nBarry: Yeah, and did he defeat his arch-enemy, General Mills? 😆\\n\\nJerry: Ha-ha, very funny. Anyway, that sushi place sounds awesome, Samantha. Next time, let\\'s go together, and maybe Barry can guide us... if we want a city-wide tour first.\\n\\nBarry: As long as we\\'re not hiking, I\\'ll get us there... eventually. 😅\\n\\nSamantha: It\\'s a date! But Jerry, you\\'re banned from carrying any food items.\\n\\nJerry: Deal! Just promise me no wasabi challenges. I don\\'t want to end up like the time I tried Sriracha ice cream.', metadata={}), Document(page_content=\"Barry: Wait, what happened with Sriracha ice cream?\\n\\nJerry: Let's just say it was a hot situation. Literally. 🔥\\n\\nSamantha: 🤣 I still have the video!\\n\\nJerry: Samantha, if you value our friendship, that video will never see the light of day.\\n\\nSamantha: No promises, Jerry. No promises. 🤐😈\\n\\nBarry: I foresee a fun weekend ahead! 🎉\", metadata={})]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Your Deep Lake dataset has been successfully created!\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\\"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Dataset(path='hub://adilkhan/data', tensors=['embedding', 'id', 'metadata', 'text'])\n",
|
||||
"\n",
|
||||
" tensor htype shape dtype compression\n",
|
||||
" ------- ------- ------- ------- ------- \n",
|
||||
" embedding embedding (3, 1536) float32 None \n",
|
||||
" id text (3, 1) str None \n",
|
||||
" metadata json (3, 1) str None \n",
|
||||
" text text (3, 1) str None \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" \r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with open(\"messages.txt\") as f:\n",
|
||||
" state_of_the_union = f.read()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"pages = text_splitter.split_text(state_of_the_union)\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.create_documents(pages)\n",
|
||||
"\n",
|
||||
"print(texts)\n",
|
||||
"\n",
|
||||
"dataset_path = \"hub://\" + org_id + \"/data\"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"db = DeepLake.from_documents(\n",
|
||||
" texts, embeddings, dataset_path=dataset_path, overwrite=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`Optional`: You can also use Deep Lake's Managed Tensor Database as a hosting service and run queries there. In order to do so, it is necessary to specify the runtime parameter as {'tensor_db': True} during the creation of the vector store. This configuration enables the execution of queries on the Managed Tensor Database, rather than on the client side. It should be noted that this functionality is not applicable to datasets stored locally or in-memory. In the event that a vector store has already been created outside of the Managed Tensor Database, it is possible to transfer it to the Managed Tensor Database by following the prescribed steps."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# with open(\"messages.txt\") as f:\n",
|
||||
"# state_of_the_union = f.read()\n",
|
||||
"# text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"# pages = text_splitter.split_text(state_of_the_union)\n",
|
||||
"\n",
|
||||
"# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)\n",
|
||||
"# texts = text_splitter.create_documents(pages)\n",
|
||||
"\n",
|
||||
"# print(texts)\n",
|
||||
"\n",
|
||||
"# dataset_path = \"hub://\" + org + \"/data\"\n",
|
||||
"# embeddings = OpenAIEmbeddings()\n",
|
||||
"# db = DeepLake.from_documents(\n",
|
||||
"# texts, embeddings, dataset_path=dataset_path, overwrite=True, runtime={\"tensor_db\": True}\n",
|
||||
"# )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Ask questions\n",
|
||||
"\n",
|
||||
"Now we can ask a question and get an answer back with a semantic search:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = DeepLake(dataset_path=dataset_path, read_only=True, embedding=embeddings)\n",
|
||||
"\n",
|
||||
"retriever = db.as_retriever()\n",
|
||||
"retriever.search_kwargs[\"distance_metric\"] = \"cos\"\n",
|
||||
"retriever.search_kwargs[\"k\"] = 4\n",
|
||||
"\n",
|
||||
"qa = RetrievalQA.from_chain_type(\n",
|
||||
" llm=OpenAI(), chain_type=\"stuff\", retriever=retriever, return_source_documents=False\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# What was the restaurant the group was talking about called?\n",
|
||||
"query = input(\"Enter query:\")\n",
|
||||
"\n",
|
||||
"# The Hungry Lobster\n",
|
||||
"ans = qa({\"query\": query})\n",
|
||||
"\n",
|
||||
"print(ans)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,156 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Elasticsearch\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/qa_structured/integrations/elasticsearch.ipynb)\n",
|
||||
"\n",
|
||||
"We can use LLMs to interact with Elasticsearch analytics databases in natural language.\n",
|
||||
"\n",
|
||||
"This chain builds search queries via the Elasticsearch DSL API (filters and aggregations).\n",
|
||||
"\n",
|
||||
"The Elasticsearch client must have permissions for index listing, mapping description and search queries.\n",
|
||||
"\n",
|
||||
"See [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html) for instructions on how to run Elasticsearch locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install langchain langchain-experimental openai elasticsearch\n",
|
||||
"\n",
|
||||
"# Set env var OPENAI_API_KEY or load from a .env file\n",
|
||||
"# import dotenv\n",
|
||||
"\n",
|
||||
"# dotenv.load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from elasticsearch import Elasticsearch\n",
|
||||
"from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize Elasticsearch python client.\n",
|
||||
"# See https://elasticsearch-py.readthedocs.io/en/v8.8.2/api.html#elasticsearch.Elasticsearch\n",
|
||||
"ELASTIC_SEARCH_SERVER = \"https://elastic:pass@localhost:9200\"\n",
|
||||
"db = Elasticsearch(ELASTIC_SEARCH_SERVER)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Uncomment the next cell to initially populate your db."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# customers = [\n",
|
||||
"# {\"firstname\": \"Jennifer\", \"lastname\": \"Walters\"},\n",
|
||||
"# {\"firstname\": \"Monica\",\"lastname\":\"Rambeau\"},\n",
|
||||
"# {\"firstname\": \"Carol\",\"lastname\":\"Danvers\"},\n",
|
||||
"# {\"firstname\": \"Wanda\",\"lastname\":\"Maximoff\"},\n",
|
||||
"# {\"firstname\": \"Jennifer\",\"lastname\":\"Takeda\"},\n",
|
||||
"# ]\n",
|
||||
"# for i, customer in enumerate(customers):\n",
|
||||
"# db.create(index=\"customers\", document=customer, id=i)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
|
||||
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What are the first names of all the customers?\"\n",
|
||||
"chain.run(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can customize the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"PROMPT_TEMPLATE = \"\"\"Given an input question, create a syntactically correct Elasticsearch query to run. Unless the user specifies in their question a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.\n",
|
||||
"\n",
|
||||
"Unless told to do not query for all the columns from a specific index, only ask for a few relevant columns given the question.\n",
|
||||
"\n",
|
||||
"Pay attention to use only the column names that you can see in the mapping description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which index. Return the query as valid json.\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: Question here\n",
|
||||
"ESQuery: Elasticsearch Query formatted as json\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate.from_template(\n",
|
||||
" PROMPT_TEMPLATE,\n",
|
||||
")\n",
|
||||
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, query_prompt=PROMPT)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,214 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2def22ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Extraction with OpenAI Tools\n",
|
||||
"\n",
|
||||
"Performing extraction has never been easier! OpenAI's tool calling ability is the perfect thing to use as it allows for extracting multiple different elements from text that are different types. \n",
|
||||
"\n",
|
||||
"Models after 1106 use tools and support \"parallel function calling\" which makes this super easy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "5c628496",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain.chains.openai_tools import create_extraction_chain_pydantic\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "afe9657b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Make sure to use a recent model that supports tools\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "bc0ca3b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Pydantic is an easy way to define a schema\n",
|
||||
"class Person(BaseModel):\n",
|
||||
" \"\"\"Information about people to extract.\"\"\"\n",
|
||||
"\n",
|
||||
" name: str\n",
|
||||
" age: Optional[int] = None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "2036af68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain_pydantic(Person, model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "1748ad21",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Person(name='jane', age=2), Person(name='bob', age=3)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": \"jane is 2 and bob is 3\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "c8262ce5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Let's define another element\n",
|
||||
"class Class(BaseModel):\n",
|
||||
" \"\"\"Information about classes to extract.\"\"\"\n",
|
||||
"\n",
|
||||
" teacher: str\n",
|
||||
" students: List[str]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "4973c104",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = create_extraction_chain_pydantic([Person, Class], model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "e976a15e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Person(name='jane', age=2),\n",
|
||||
" Person(name='bob', age=3),\n",
|
||||
" Class(teacher='Mrs Sampson', students=['jane', 'bob'])]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": \"jane is 2 and bob is 3 and they are in Mrs Sampson's class\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6575a7d6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Under the hood\n",
|
||||
"\n",
|
||||
"Under the hood, this is a simple chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b8ba83e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```python\n",
|
||||
"from typing import Union, List, Type, Optional\n",
|
||||
"\n",
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
|
||||
"from langchain_core.runnables import Runnable\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"from langchain_core.language_models import BaseLanguageModel\n",
|
||||
"\n",
|
||||
"_EXTRACTION_TEMPLATE = \"\"\"Extract and save the relevant entities mentioned \\\n",
|
||||
"in the following passage together with their properties.\n",
|
||||
"\n",
|
||||
"If a property is not present and is not required in the function parameters, do not include it in the output.\"\"\" # noqa: E501\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_extraction_chain_pydantic(\n",
|
||||
" pydantic_schemas: Union[List[Type[BaseModel]], Type[BaseModel]],\n",
|
||||
" llm: BaseLanguageModel,\n",
|
||||
" system_message: str = _EXTRACTION_TEMPLATE,\n",
|
||||
") -> Runnable:\n",
|
||||
" if not isinstance(pydantic_schemas, list):\n",
|
||||
" pydantic_schemas = [pydantic_schemas]\n",
|
||||
" prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" (\"system\", system_message),\n",
|
||||
" (\"user\", \"{input}\")\n",
|
||||
" ])\n",
|
||||
" tools = [convert_pydantic_to_openai_tool(p) for p in pydantic_schemas]\n",
|
||||
" model = llm.bind(tools=tools)\n",
|
||||
" chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)\n",
|
||||
" return chain\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2eac6b68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "052dfe58",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Fake LLM\n",
|
||||
"LangChain provides a fake LLM class that can be used for testing. This allows you to mock out calls to the LLM and simulate what would happen if the LLM responded in a certain way.\n",
|
||||
"\n",
|
||||
"In this notebook we go over how to use this.\n",
|
||||
"\n",
|
||||
"We start this with using the FakeLLM in an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ef97ac4d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms.fake import FakeListLLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9a0a160f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "b272258c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = load_tools([\"python_repl\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "94096c4c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"responses = [\"Action: Python REPL\\nAction Input: print(2 + 2)\", \"Final Answer: 4\"]\n",
|
||||
"llm = FakeListLLM(responses=responses)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "da226d02",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "44c13426",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction: Python REPL\n",
|
||||
"Action Input: print(2 + 2)\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m4\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3mFinal Answer: 4\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'4'"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.invoke(\"whats 2 + 2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "814c2858",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,245 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0fc0309d-4d49-4bb5-bec0-bd92c6fddb28",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Fireworks.AI + LangChain + RAG\n",
|
||||
" \n",
|
||||
"[Fireworks AI](https://python.langchain.com/docs/integrations/llms/fireworks) wants to provide the best experience when working with LangChain, and here is an example of Fireworks + LangChain doing RAG\n",
|
||||
"\n",
|
||||
"See [our models page](https://fireworks.ai/models) for the full list of models. We use `accounts/fireworks/models/mixtral-8x7b-instruct` for RAG In this tutorial.\n",
|
||||
"\n",
|
||||
"For the RAG target, we will use the Gemma technical report https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "d12fb75a-f707-48d5-82a5-efe2d041813c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Found existing installation: langchain-fireworks 0.0.1\n",
|
||||
"Uninstalling langchain-fireworks-0.0.1:\n",
|
||||
" Successfully uninstalled langchain-fireworks-0.0.1\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Obtaining file:///mnt/disks/data/langchain/libs/partners/fireworks\n",
|
||||
" Installing build dependencies ... \u001b[?25ldone\n",
|
||||
"\u001b[?25h Checking if build backend supports build_editable ... \u001b[?25ldone\n",
|
||||
"\u001b[?25h Getting requirements to build editable ... \u001b[?25ldone\n",
|
||||
"\u001b[?25h Preparing editable metadata (pyproject.toml) ... \u001b[?25ldone\n",
|
||||
"\u001b[?25hRequirement already satisfied: aiohttp<4.0.0,>=3.9.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-fireworks==0.0.1) (3.9.3)\n",
|
||||
"Requirement already satisfied: fireworks-ai<0.13.0,>=0.12.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-fireworks==0.0.1) (0.12.0)\n",
|
||||
"Requirement already satisfied: langchain-core<0.2,>=0.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-fireworks==0.0.1) (0.1.23)\n",
|
||||
"Requirement already satisfied: requests<3,>=2 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-fireworks==0.0.1) (2.31.0)\n",
|
||||
"Requirement already satisfied: aiosignal>=1.1.2 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (1.3.1)\n",
|
||||
"Requirement already satisfied: attrs>=17.3.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (23.1.0)\n",
|
||||
"Requirement already satisfied: frozenlist>=1.1.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (1.4.0)\n",
|
||||
"Requirement already satisfied: multidict<7.0,>=4.5 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (6.0.4)\n",
|
||||
"Requirement already satisfied: yarl<2.0,>=1.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (1.9.2)\n",
|
||||
"Requirement already satisfied: async-timeout<5.0,>=4.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (4.0.3)\n",
|
||||
"Requirement already satisfied: httpx in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (0.26.0)\n",
|
||||
"Requirement already satisfied: httpx-sse in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (0.4.0)\n",
|
||||
"Requirement already satisfied: pydantic in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (2.4.2)\n",
|
||||
"Requirement already satisfied: Pillow in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (10.2.0)\n",
|
||||
"Requirement already satisfied: PyYAML>=5.3 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (6.0.1)\n",
|
||||
"Requirement already satisfied: anyio<5,>=3 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (3.7.1)\n",
|
||||
"Requirement already satisfied: jsonpatch<2.0,>=1.33 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (1.33)\n",
|
||||
"Requirement already satisfied: langsmith<0.2.0,>=0.1.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (0.1.5)\n",
|
||||
"Requirement already satisfied: packaging<24.0,>=23.2 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (23.2)\n",
|
||||
"Requirement already satisfied: tenacity<9.0.0,>=8.1.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (8.2.3)\n",
|
||||
"Requirement already satisfied: charset-normalizer<4,>=2 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from requests<3,>=2->langchain-fireworks==0.0.1) (3.3.0)\n",
|
||||
"Requirement already satisfied: idna<4,>=2.5 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from requests<3,>=2->langchain-fireworks==0.0.1) (3.4)\n",
|
||||
"Requirement already satisfied: urllib3<3,>=1.21.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from requests<3,>=2->langchain-fireworks==0.0.1) (2.0.6)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from requests<3,>=2->langchain-fireworks==0.0.1) (2023.7.22)\n",
|
||||
"Requirement already satisfied: sniffio>=1.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from anyio<5,>=3->langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (1.3.0)\n",
|
||||
"Requirement already satisfied: exceptiongroup in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from anyio<5,>=3->langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (1.1.3)\n",
|
||||
"Requirement already satisfied: jsonpointer>=1.9 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (2.4)\n",
|
||||
"Requirement already satisfied: annotated-types>=0.4.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from pydantic->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (0.5.0)\n",
|
||||
"Requirement already satisfied: pydantic-core==2.10.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from pydantic->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (2.10.1)\n",
|
||||
"Requirement already satisfied: typing-extensions>=4.6.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from pydantic->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (4.8.0)\n",
|
||||
"Requirement already satisfied: httpcore==1.* in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from httpx->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (1.0.2)\n",
|
||||
"Requirement already satisfied: h11<0.15,>=0.13 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from httpcore==1.*->httpx->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (0.14.0)\n",
|
||||
"Building wheels for collected packages: langchain-fireworks\n",
|
||||
" Building editable for langchain-fireworks (pyproject.toml) ... \u001b[?25ldone\n",
|
||||
"\u001b[?25h Created wheel for langchain-fireworks: filename=langchain_fireworks-0.0.1-py3-none-any.whl size=2228 sha256=564071b120b09ec31f2dc737733448a33bbb26e40b49fcde0c129ad26045259d\n",
|
||||
" Stored in directory: /tmp/pip-ephem-wheel-cache-oz368vdk/wheels/e0/ad/31/d7e76dd73d61905ff7f369f5b0d21a4b5e7af4d3cb7487aece\n",
|
||||
"Successfully built langchain-fireworks\n",
|
||||
"Installing collected packages: langchain-fireworks\n",
|
||||
"Successfully installed langchain-fireworks-0.0.1\n",
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --quiet pypdf langchain-chroma tiktoken openai \n",
|
||||
"%pip uninstall -y langchain-fireworks\n",
|
||||
"%pip install --editable /mnt/disks/data/langchain/libs/partners/fireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cf719376",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<module 'fireworks' from '/mnt/disks/data/langchain/.venv/lib/python3.9/site-packages/fireworks/__init__.py'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import fireworks\n",
|
||||
"\n",
|
||||
"print(fireworks)\n",
|
||||
"import fireworks.client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9ab49327-0532-4480-804c-d066c302a322",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load\n",
|
||||
"import requests\n",
|
||||
"from langchain_community.document_loaders import PyPDFLoader\n",
|
||||
"\n",
|
||||
"# Download the PDF from a URL and save it to a temporary location\n",
|
||||
"url = \"https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf\"\n",
|
||||
"response = requests.get(url, stream=True)\n",
|
||||
"file_name = \"temp_file.pdf\"\n",
|
||||
"with open(file_name, \"wb\") as pdf:\n",
|
||||
" pdf.write(response.content)\n",
|
||||
"\n",
|
||||
"loader = PyPDFLoader(file_name)\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"# Split\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Add to vectorDB\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_fireworks.embeddings import FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(\n",
|
||||
" documents=all_splits,\n",
|
||||
" collection_name=\"rag-chroma\",\n",
|
||||
" embedding=FireworksEmbeddings(),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "4efaddd9-3dbb-455c-ba54-0ad7f2d2ce0f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
||||
"\n",
|
||||
"# RAG prompt\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"# LLM\n",
|
||||
"from langchain_together import Together\n",
|
||||
"\n",
|
||||
"llm = Together(\n",
|
||||
" model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n",
|
||||
" temperature=0.0,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" top_k=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# RAG chain\n",
|
||||
"chain = (\n",
|
||||
" RunnableParallel({\"context\": retriever, \"question\": RunnablePassthrough()})\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "88b1ee51-1b0f-4ebf-bb32-e50e843f0eeb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nAnswer: The architectural details of Mixtral are as follows:\\n- Dimension (dim): 4096\\n- Number of layers (n\\\\_layers): 32\\n- Dimension of each head (head\\\\_dim): 128\\n- Hidden dimension (hidden\\\\_dim): 14336\\n- Number of heads (n\\\\_heads): 32\\n- Number of kv heads (n\\\\_kv\\\\_heads): 8\\n- Context length (context\\\\_len): 32768\\n- Vocabulary size (vocab\\\\_size): 32000\\n- Number of experts (num\\\\_experts): 8\\n- Number of top k experts (top\\\\_k\\\\_experts): 2\\n\\nMixtral is based on a transformer architecture and uses the same modifications as described in [18], with the notable exceptions that Mixtral supports a fully dense context length of 32k tokens, and the feedforward block picks from a set of 8 distinct groups of parameters. At every layer, for every token, a router network chooses two of these groups (the “experts”) to process the token and combine their output additively. This technique increases the number of parameters of a model while controlling cost and latency, as the model only uses a fraction of the total set of parameters per token. Mixtral is pretrained with multilingual data using a context size of 32k tokens. It either matches or exceeds the performance of Llama 2 70B and GPT-3.5, over several benchmarks. In particular, Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and multilingual benchmarks.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"What are the Architectural details of Mixtral?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "755cf871-26b7-4e30-8b91-9ffd698470f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Trace: \n",
|
||||
"\n",
|
||||
"https://smith.langchain.com/public/935fd642-06a6-4b42-98e3-6074f93115cd/r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,493 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f0b9afa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Retrieve as you generate with FLARE\n",
|
||||
"\n",
|
||||
"This notebook is an implementation of Forward-Looking Active REtrieval augmented generation (FLARE).\n",
|
||||
"\n",
|
||||
"Please see the original repo [here](https://github.com/jzbjyb/FLARE/tree/main).\n",
|
||||
"\n",
|
||||
"The basic idea is:\n",
|
||||
"\n",
|
||||
"- Start answering a question\n",
|
||||
"- If you start generating tokens the model is uncertain about, look up relevant documents\n",
|
||||
"- Use those documents to continue generating\n",
|
||||
"- Repeat until finished\n",
|
||||
"\n",
|
||||
"There is a lot of cool detail in how the lookup of relevant documents is done.\n",
|
||||
"Basically, the tokens that model is uncertain about are highlighted, and then an LLM is called to generate a question that would lead to that answer. For example, if the generated text is `Joe Biden went to Harvard`, and the tokens the model was uncertain about was `Harvard`, then a good generated question would be `where did Joe Biden go to college`. This generated question is then used in a retrieval step to fetch relevant documents.\n",
|
||||
"\n",
|
||||
"In order to set up this chain, we will need three things:\n",
|
||||
"\n",
|
||||
"- An LLM to generate the answer\n",
|
||||
"- An LLM to generate hypothetical questions to use in retrieval\n",
|
||||
"- A retriever to use to look up answers for\n",
|
||||
"\n",
|
||||
"The LLM that we use to generate the answer needs to return logprobs so we can identify uncertain tokens. For that reason, we HIGHLY recommend that you use the OpenAI wrapper (NB: not the ChatOpenAI wrapper, as that does not return logprobs).\n",
|
||||
"\n",
|
||||
"The LLM we use to generate hypothetical questions to use in retrieval can be anything. In this notebook we will use ChatOpenAI because it is fast and cheap.\n",
|
||||
"\n",
|
||||
"The retriever can be anything. In this notebook we will use [SERPER](https://serper.dev/) search engine, because it is cheap.\n",
|
||||
"\n",
|
||||
"Other important parameters to understand:\n",
|
||||
"\n",
|
||||
"- `max_generation_len`: The maximum number of tokens to generate before stopping to check if any are uncertain\n",
|
||||
"- `min_prob`: Any tokens generated with probability below this will be considered uncertain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a7e4b63d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "042bb161",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"SERPER_API_KEY\"] = \"\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a7888f4a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, List\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.manager import (\n",
|
||||
" AsyncCallbackManagerForRetrieverRun,\n",
|
||||
" CallbackManagerForRetrieverRun,\n",
|
||||
")\n",
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.retrievers import BaseRetriever\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5f552dce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "59c7d875",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class SerperSearchRetriever(BaseRetriever):\n",
|
||||
" search: GoogleSerperAPIWrapper = None\n",
|
||||
"\n",
|
||||
" def _get_relevant_documents(\n",
|
||||
" self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any\n",
|
||||
" ) -> List[Document]:\n",
|
||||
" return [Document(page_content=self.search.run(query))]\n",
|
||||
"\n",
|
||||
" async def _aget_relevant_documents(\n",
|
||||
" self,\n",
|
||||
" query: str,\n",
|
||||
" *,\n",
|
||||
" run_manager: AsyncCallbackManagerForRetrieverRun,\n",
|
||||
" **kwargs: Any,\n",
|
||||
" ) -> List[Document]:\n",
|
||||
" raise NotImplementedError()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"retriever = SerperSearchRetriever(search=GoogleSerperAPIWrapper())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "92478194",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## FLARE Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "577e7c2c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We set this so we can see what exactly is going on\n",
|
||||
"from langchain.globals import set_verbose\n",
|
||||
"\n",
|
||||
"set_verbose(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "300d783e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import FlareChain\n",
|
||||
"\n",
|
||||
"flare = FlareChain.from_llm(\n",
|
||||
" ChatOpenAI(temperature=0),\n",
|
||||
" retriever=retriever,\n",
|
||||
" max_generation_len=164,\n",
|
||||
" min_prob=0.3,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "1f3d5e90",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"explain in great detail the difference between the langchain framework and baby agi\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "4b1bfa8c",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new FlareChain chain...\u001b[0m\n",
|
||||
"\u001b[36;1m\u001b[1;3mCurrent Response: \u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.\n",
|
||||
"\n",
|
||||
">>> CONTEXT: \n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> RESPONSE: \u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new QuestionGeneratorChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" decentralized platform for natural language processing\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" uses a blockchain\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" distributed ledger to\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" process data, allowing for secure and transparent data sharing.\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" set of tools\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" help developers create\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" create an AI system\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"The Langchain Framework is a decentralized platform for natural language processing (NLP) applications. It uses a blockchain-based distributed ledger to store and process data, allowing for secure and transparent data sharing. The Langchain Framework also provides a set of tools and services to help developers create and deploy NLP applications.\n",
|
||||
"\n",
|
||||
"Baby AGI, on the other hand, is an artificial general intelligence (AGI) platform. It uses a combination of deep learning and reinforcement learning to create an AI system that can learn and adapt to new tasks. Baby AGI is designed to be a general-purpose AI system that can be used for a variety of applications, including natural language processing.\n",
|
||||
"\n",
|
||||
"In summary, the Langchain Framework is a platform for NLP applications, while Baby AGI is an AI system designed for\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" NLP applications\" is:\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3mGenerated Questions: ['What is the Langchain Framework?', 'What technology does the Langchain Framework use to store and process data for secure and transparent data sharing?', 'What technology does the Langchain Framework use to store and process data?', 'What does the Langchain Framework use a blockchain-based distributed ledger for?', 'What does the Langchain Framework provide in addition to a decentralized platform for natural language processing applications?', 'What set of tools and services does the Langchain Framework provide?', 'What is the purpose of Baby AGI?', 'What type of applications is the Langchain Framework designed for?']\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new _OpenAIResponseChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.\n",
|
||||
"\n",
|
||||
">>> CONTEXT: LangChain: Software. LangChain is a software development framework designed to simplify the creation of applications using large language models. LangChain Initial release date: October 2022. LangChain Programming languages: Python and JavaScript. LangChain Developer(s): Harrison Chase. LangChain License: MIT License. LangChain is a framework for developing applications powered by language models. We believe that the most powerful and differentiated applications will not only ... Type: Software framework. At its core, LangChain is a framework built around LLMs. We can use it for chatbots, Generative Question-Answering (GQA), summarization, and much more. LangChain is a powerful tool that can be used to work with Large Language Models (LLMs). LLMs are very general in nature, which means that while they can ... LangChain is an intuitive framework created to assist in developing applications driven by a language model, such as OpenAI or Hugging Face. LangChain is a software development framework designed to simplify the creation of applications using large language models (LLMs). Written in: Python and JavaScript. Initial release: October 2022. LangChain - The A.I-native developer toolkit We started LangChain with the intent to build a modular and flexible framework for developing A.I- ... LangChain explained in 3 minutes - LangChain is a ... Duration: 3:03. Posted: Apr 13, 2023. LangChain is a framework built to help you build LLM-powered applications more easily by providing you with the following:. LangChain is a framework that enables quick and easy development of applications that make use of Large Language Models, for example, GPT-3. LangChain is a powerful open-source framework for developing applications powered by language models. It connects to the AI models you want to ...\n",
|
||||
"\n",
|
||||
"LangChain is a framework for including AI from large language models inside data pipelines and applications. This tutorial provides an overview of what you ... Missing: secure | Must include:secure. Blockchain is the best way to secure the data of the shared community. Utilizing the capabilities of the blockchain nobody can read or interfere ... This modern technology consists of a chain of blocks that allows to securely store all committed transactions using shared and distributed ... A Blockchain network is used in the healthcare system to preserve and exchange patient data through hospitals, diagnostic laboratories, pharmacy firms, and ... In this article, I will walk you through the process of using the LangChain.js library with Google Cloud Functions, helping you leverage the ... LangChain is an intuitive framework created to assist in developing applications driven by a language model, such as OpenAI or Hugging Face. Missing: transparent | Must include:transparent. This technology keeps a distributed ledger on each blockchain node, making it more secure and transparent. The blockchain network can operate smart ... blockchain technology can offer a highly secured health data ledger to ... framework can be employed to store encrypted healthcare data in a ... In a simplified way, Blockchain is a data structure that stores transactions in an ordered way and linked to the previous block, serving as a ... Blockchain technology is a decentralized, distributed ledger that stores the record of ownership of digital assets. Missing: Langchain | Must include:Langchain.\n",
|
||||
"\n",
|
||||
"LangChain is a framework for including AI from large language models inside data pipelines and applications. This tutorial provides an overview of what you ... LangChain is an intuitive framework created to assist in developing applications driven by a language model, such as OpenAI or Hugging Face. This documentation covers the steps to integrate Pinecone, a high-performance vector database, with LangChain, a framework for building applications powered ... The ability to connect to any model, ingest any custom database, and build upon a framework that can take action provides numerous use cases for ... With LangChain, developers can use a framework that abstracts the core building blocks of LLM applications. LangChain empowers developers to ... Build a question-answering tool based on financial data with LangChain & Deep Lake's unified & streamable data store. Browse applications built on LangChain technology. Explore PoC and MVP applications created by our community and discover innovative use cases for LangChain ... LangChain is a great framework that can be used for developing applications powered by LLMs. When you intend to enhance your application ... In this blog, we'll introduce you to LangChain and Ray Serve and how to use them to build a search engine using LLM embeddings and a vector ... The LinkChain Framework simplifies embedding creation and storage using Pinecone and Chroma, with code that loads files, splits documents, and creates embedding ... Missing: technology | Must include:technology.\n",
|
||||
"\n",
|
||||
"Blockchain is one type of a distributed ledger. Distributed ledgers use independent computers (referred to as nodes) to record, share and ... Missing: Langchain | Must include:Langchain. Blockchain is used in distributed storage software where huge data is broken down into chunks. This is available in encrypted data across a ... People sometimes use the terms 'Blockchain' and 'Distributed Ledger' interchangeably. This post aims to analyze the features of each. A distributed ledger ... Missing: Framework | Must include:Framework. Think of a “distributed ledger” that uses cryptography to allow each participant in the transaction to add to the ledger in a secure way without ... In this paper, we provide an overview of the history of trade settlement and discuss this nascent technology that may now transform traditional ... Missing: Langchain | Must include:Langchain. LangChain is a blockchain-based language education platform that aims to revolutionize the way people learn languages. Missing: Framework | Must include:Framework. It uses the distributed ledger technology framework and Smart contract engine for building scalable Business Blockchain applications. The fabric ... It looks at the assets the use case is handling, the different parties conducting transactions, and the smart contract, distributed ... Are you curious to know how Blockchain and Distributed ... Duration: 44:31. Posted: May 4, 2021. A blockchain is a distributed and immutable ledger to transfer ownership, record transactions, track assets, and ensure transparency, security, trust and value ... Missing: Langchain | Must include:Langchain.\n",
|
||||
"\n",
|
||||
"LangChain is an intuitive framework created to assist in developing applications driven by a language model, such as OpenAI or Hugging Face. Missing: decentralized | Must include:decentralized. LangChain, created by Harrison Chase, is a Python library that provides out-of-the-box support to build NLP applications using LLMs. Missing: decentralized | Must include:decentralized. LangChain provides a standard interface for chains, enabling developers to create sequences of calls that go beyond a single LLM call. Chains ... Missing: decentralized platform natural. LangChain is a powerful framework that simplifies the process of building advanced language model applications. Missing: platform | Must include:platform. Are your language models ignoring previous instructions ... Duration: 32:23. Posted: Feb 21, 2023. LangChain is a framework that enables quick and easy development of applications ... Prompting is the new way of programming NLP models. Missing: decentralized platform. It then uses natural language processing and machine learning algorithms to search ... Summarization is handled via cohere, QnA is handled via langchain, ... LangChain is a framework for developing applications powered by language models. ... There are several main modules that LangChain provides support for. Missing: decentralized platform. In the healthcare-chain system, blockchain provides an appreciated secure ... The entire process of adding new and previous block data is performed based on ... ChatGPT is a large language model developed by OpenAI, ... tool for a wide range of applications, including natural language processing, ...\n",
|
||||
"\n",
|
||||
"LangChain is a powerful tool that can be used to work with Large Language ... If an API key has been provided, create an OpenAI language model instance At its core, LangChain is a framework built around LLMs. We can use it for chatbots, Generative Question-Answering (GQA), summarization, and much more. A tutorial of the six core modules of the LangChain Python package covering models, prompts, chains, agents, indexes, and memory with OpenAI ... LangChain's collection of tools refers to a set of tools provided by the LangChain framework for developing applications powered by language models. LangChain is a framework for developing applications powered by language models. We believe that the most powerful and differentiated applications will not only ... LangChain is an open-source library that provides developers with the tools to build applications powered by large language models (LLMs). LangChain is a framework for including AI from large language models inside data pipelines and applications. This tutorial provides an overview of what you ... Plan-and-Execute Agents · Feature Stores and LLMs · Structured Tools · Auto-Evaluator Opportunities · Callbacks Improvements · Unleashing the power ... Tool: A function that performs a specific duty. This can be things like: Google Search, Database lookup, Python REPL, other chains. · LLM: The language model ... LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.\n",
|
||||
"\n",
|
||||
"Baby AGI has the ability to complete tasks, generate new tasks based on previous results, and prioritize tasks in real-time. This system is exploring and demonstrating to us the potential of large language models, such as GPT and how it can autonomously perform tasks. Apr 17, 2023\n",
|
||||
"\n",
|
||||
"At its core, LangChain is a framework built around LLMs. We can use it for chatbots, Generative Question-Answering (GQA), summarization, and much more. The core idea of the library is that we can “chain” together different components to create more advanced use cases around LLMs.\n",
|
||||
">>> USER INPUT: explain in great detail the difference between the langchain framework and baby agi\n",
|
||||
">>> RESPONSE: \u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' LangChain is a framework for developing applications powered by language models. It provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications. On the other hand, Baby AGI is an AI system that is exploring and demonstrating the potential of large language models, such as GPT, and how it can autonomously perform tasks. Baby AGI has the ability to complete tasks, generate new tasks based on previous results, and prioritize tasks in real-time. '"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"flare.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "7bed8944",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nThe Langchain framework and Baby AGI are both artificial intelligence (AI) frameworks that are used to create intelligent agents. The Langchain framework is a supervised learning system that is based on the concept of “language chains”. It uses a set of rules to map natural language inputs to specific outputs. It is a general-purpose AI framework and can be used to build applications such as natural language processing (NLP), chatbots, and more.\\n\\nBaby AGI, on the other hand, is an unsupervised learning system that uses neural networks and reinforcement learning to learn from its environment. It is used to create intelligent agents that can adapt to changing environments. It is a more advanced AI system and can be used to build more complex applications such as game playing, robotic vision, and more.\\n\\nThe main difference between the two is that the Langchain framework uses supervised learning while Baby AGI uses unsupervised learning. The Langchain framework is a general-purpose AI framework that can be used for various applications, while Baby AGI is a more advanced AI system that can be used to create more complex applications.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI()\n",
|
||||
"llm.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "8fb76286",
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new FlareChain chain...\u001b[0m\n",
|
||||
"\u001b[36;1m\u001b[1;3mCurrent Response: \u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.\n",
|
||||
"\n",
|
||||
">>> CONTEXT: \n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> RESPONSE: \u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new QuestionGeneratorChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"\n",
|
||||
"Langchain and Bitcoin have very different origin stories. Bitcoin was created by the mysterious Satoshi Nakamoto in 2008 as a decentralized digital currency. Langchain, on the other hand, was created in 2020 by a team of developers as a platform for creating and managing decentralized language learning applications. \n",
|
||||
"\n",
|
||||
"FINISHED\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" very different origin\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"\n",
|
||||
"Langchain and Bitcoin have very different origin stories. Bitcoin was created by the mysterious Satoshi Nakamoto in 2008 as a decentralized digital currency. Langchain, on the other hand, was created in 2020 by a team of developers as a platform for creating and managing decentralized language learning applications. \n",
|
||||
"\n",
|
||||
"FINISHED\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" 2020 by a\" is:\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mGiven a user input and an existing partial response as context, ask a question to which the answer is the given term/entity/phrase:\n",
|
||||
"\n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> EXISTING PARTIAL RESPONSE: \n",
|
||||
"\n",
|
||||
"Langchain and Bitcoin have very different origin stories. Bitcoin was created by the mysterious Satoshi Nakamoto in 2008 as a decentralized digital currency. Langchain, on the other hand, was created in 2020 by a team of developers as a platform for creating and managing decentralized language learning applications. \n",
|
||||
"\n",
|
||||
"FINISHED\n",
|
||||
"\n",
|
||||
"The question to which the answer is the term/entity/phrase \" developers as a platform for creating and managing decentralized language learning applications.\" is:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\u001b[33;1m\u001b[1;3mGenerated Questions: ['How would you describe the origin stories of Langchain and Bitcoin in terms of their similarities or differences?', 'When was Langchain created and by whom?', 'What was the purpose of creating Langchain?']\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new _OpenAIResponseChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mRespond to the user message using any relevant context. If context is provided, you should ground your answer in that context. Once you're done responding return FINISHED.\n",
|
||||
"\n",
|
||||
">>> CONTEXT: Bitcoin and Ethereum have many similarities but different long-term visions and limitations. Ethereum changed from proof of work to proof of ... Bitcoin will be around for many years and examining its white paper origins is a great exercise in understanding why. Satoshi Nakamoto's blueprint describes ... Bitcoin is a new currency that was created in 2009 by an unknown person using the alias Satoshi Nakamoto. Transactions are made with no middle men – meaning, no ... Missing: Langchain | Must include:Langchain. By comparison, Bitcoin transaction speeds are tremendously lower. ... learn about its history and its role in the emergence of the Bitcoin ... LangChain is a powerful framework that simplifies the process of ... tasks like document retrieval, clustering, and similarity comparisons. Key terms: Bitcoin System, Blockchain Technology, ... Furthermore, the research paper will discuss and compare the five payment. Blockchain first appeared in Nakamoto's Bitcoin white paper that describes a new decentralized cryptocurrency [1]. Bitcoin takes the blockchain technology ... Missing: stories | Must include:stories. A score of 0 means there were not enough data for this term. Google trends was accessed on 5 November 2018 with searches for bitcoin, euro, gold ... Contracts, transactions, and records of them provide critical structure in our economic system, but they haven't kept up with the world's digital ... Missing: Langchain | Must include:Langchain. Of course, traders try to make a profit on their portfolio in this way.The difference between investing and trading is the regularity with which ...\n",
|
||||
"\n",
|
||||
"After all these giant leaps forward in the LLM space, OpenAI released ChatGPT — thrusting LLMs into the spotlight. LangChain appeared around the same time. Its creator, Harrison Chase, made the first commit in late October 2022. Leaving a short couple of months of development before getting caught in the LLM wave.\n",
|
||||
"\n",
|
||||
"At its core, LangChain is a framework built around LLMs. We can use it for chatbots, Generative Question-Answering (GQA), summarization, and much more. The core idea of the library is that we can “chain” together different components to create more advanced use cases around LLMs.\n",
|
||||
">>> USER INPUT: how are the origin stories of langchain and bitcoin similar or different?\n",
|
||||
">>> RESPONSE: \u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The origin stories of LangChain and Bitcoin are quite different. Bitcoin was created in 2009 by an unknown person using the alias Satoshi Nakamoto. LangChain was created in late October 2022 by Harrison Chase. Bitcoin is a decentralized cryptocurrency, while LangChain is a framework built around LLMs. '"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"flare.run(\"how are the origin stories of langchain and bitcoin similar or different?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fbadd022",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,993 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9732067-71c7-46f7-ad09-381b3bf21a27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Generative Agents in LangChain\n",
|
||||
"\n",
|
||||
"This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.\n",
|
||||
"\n",
|
||||
"In it, we leverage a time-weighted Memory object backed by a LangChain Retriever."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "53f81c37-db45-4fdc-843c-aa8fd2a9e99d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Use termcolor to make it easy to colorize the outputs.\n",
|
||||
"!pip install termcolor > /dev/null"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "3128fc21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"\n",
|
||||
"logging.basicConfig(level=logging.ERROR)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8851c370-b395-4b80-a79d-486a38ffc244",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import datetime, timedelta\n",
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.retrievers import TimeWeightedVectorStoreRetriever\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from termcolor import colored"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "81824e76",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"USER_NAME = \"Person A\" # The name you want to use when interviewing the agent.\n",
|
||||
"LLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3da1649-d88f-4973-b655-7042975cde7e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generative Agent Memory Components\n",
|
||||
"\n",
|
||||
"This tutorial highlights the memory of generative agents and its impact on their behavior. The memory varies from standard LangChain Chat memory in two aspects:\n",
|
||||
"\n",
|
||||
"1. **Memory Formation**\n",
|
||||
"\n",
|
||||
" Generative Agents have extended memories, stored in a single stream:\n",
|
||||
" 1. Observations - from dialogues or interactions with the virtual world, about self or others\n",
|
||||
" 2. Reflections - resurfaced and summarized core memories\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"2. **Memory Recall**\n",
|
||||
"\n",
|
||||
" Memories are retrieved using a weighted sum of salience, recency, and importance.\n",
|
||||
"\n",
|
||||
"You can review the definitions of the `GenerativeAgent` and `GenerativeAgentMemory` in the [reference documentation](\"https://api.python.langchain.com/en/latest/modules/experimental.html\") for the following imports, focusing on `add_memory` and `summarize_related_memories` methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "043e5203-6a41-431c-9efa-3e1743d7d25a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.generative_agents import (\n",
|
||||
" GenerativeAgent,\n",
|
||||
" GenerativeAgentMemory,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "361bd49e",
|
||||
"metadata": {
|
||||
"jp-MarkdownHeadingCollapsed": true,
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Memory Lifecycle\n",
|
||||
"\n",
|
||||
"Summarizing the key methods in the above: `add_memory` and `summarize_related_memories`.\n",
|
||||
"\n",
|
||||
"When an agent makes an observation, it stores the memory:\n",
|
||||
" \n",
|
||||
"1. Language model scores the memory's importance (1 for mundane, 10 for poignant)\n",
|
||||
"2. Observation and importance are stored within a document by TimeWeightedVectorStoreRetriever, with a `last_accessed_time`.\n",
|
||||
"\n",
|
||||
"When an agent responds to an observation:\n",
|
||||
"\n",
|
||||
"1. Generates query(s) for retriever, which fetches documents based on salience, recency, and importance.\n",
|
||||
"2. Summarizes the retrieved information\n",
|
||||
"3. Updates the `last_accessed_time` for the used documents.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2fa3ca02",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a Generative Character\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Now that we've walked through the definition, we will create two characters named \"Tommie\" and \"Eve\"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ee9c1a1d-c311-4f1c-8131-75fccd9025b1",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import math\n",
|
||||
"\n",
|
||||
"import faiss\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def relevance_score_fn(score: float) -> float:\n",
|
||||
" \"\"\"Return a similarity score on a scale [0, 1].\"\"\"\n",
|
||||
" # This will differ depending on a few things:\n",
|
||||
" # - the distance / similarity metric used by the VectorStore\n",
|
||||
" # - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)\n",
|
||||
" # This function converts the euclidean norm of normalized embeddings\n",
|
||||
" # (0 is most similar, sqrt(2) most dissimilar)\n",
|
||||
" # to a similarity function (0 to 1)\n",
|
||||
" return 1.0 - score / math.sqrt(2)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_new_memory_retriever():\n",
|
||||
" \"\"\"Create a new vector store retriever unique to the agent.\"\"\"\n",
|
||||
" # Define your embedding model\n",
|
||||
" embeddings_model = OpenAIEmbeddings()\n",
|
||||
" # Initialize the vectorstore as empty\n",
|
||||
" embedding_size = 1536\n",
|
||||
" index = faiss.IndexFlatL2(embedding_size)\n",
|
||||
" vectorstore = FAISS(\n",
|
||||
" embeddings_model.embed_query,\n",
|
||||
" index,\n",
|
||||
" InMemoryDocstore({}),\n",
|
||||
" {},\n",
|
||||
" relevance_score_fn=relevance_score_fn,\n",
|
||||
" )\n",
|
||||
" return TimeWeightedVectorStoreRetriever(\n",
|
||||
" vectorstore=vectorstore, other_score_keys=[\"importance\"], k=15\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7884f9dd-c597-4c27-8c77-1402c71bc2f8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tommies_memory = GenerativeAgentMemory(\n",
|
||||
" llm=LLM,\n",
|
||||
" memory_retriever=create_new_memory_retriever(),\n",
|
||||
" verbose=False,\n",
|
||||
" reflection_threshold=8, # we will give this a relatively low number to show how reflection works\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"tommie = GenerativeAgent(\n",
|
||||
" name=\"Tommie\",\n",
|
||||
" age=25,\n",
|
||||
" traits=\"anxious, likes design, talkative\", # You can add more persistent traits here\n",
|
||||
" status=\"looking for a job\", # When connected to a virtual world, we can have the characters update their status\n",
|
||||
" memory_retriever=create_new_memory_retriever(),\n",
|
||||
" llm=LLM,\n",
|
||||
" memory=tommies_memory,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "c524d529",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Tommie (age: 25)\n",
|
||||
"Innate traits: anxious, likes design, talkative\n",
|
||||
"No information about Tommie's core characteristics is provided in the given statements.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# The current \"Summary\" of a character can't be made because the agent hasn't made\n",
|
||||
"# any observations yet.\n",
|
||||
"print(tommie.get_summary())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "4be60979-d56e-4abf-a636-b34ffa8b7fba",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We can add memories directly to the memory object\n",
|
||||
"tommie_observations = [\n",
|
||||
" \"Tommie remembers his dog, Bruno, from when he was a kid\",\n",
|
||||
" \"Tommie feels tired from driving so far\",\n",
|
||||
" \"Tommie sees the new home\",\n",
|
||||
" \"The new neighbors have a cat\",\n",
|
||||
" \"The road is noisy at night\",\n",
|
||||
" \"Tommie is hungry\",\n",
|
||||
" \"Tommie tries to get some rest.\",\n",
|
||||
"]\n",
|
||||
"for observation in tommie_observations:\n",
|
||||
" tommie.memory.add_memory(observation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "6992b48b-697f-4973-9560-142ef85357d7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Tommie (age: 25)\n",
|
||||
"Innate traits: anxious, likes design, talkative\n",
|
||||
"Tommie is a person who is observant of his surroundings, has a sentimental side, and experiences basic human needs such as hunger and the need for rest. He also tends to get tired easily and is affected by external factors such as noise from the road or a neighbor's pet.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.\n",
|
||||
"# We will see how this summary updates after more observations to create a more rich description.\n",
|
||||
"print(tommie.get_summary(force_refresh=True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "40d39a32-838c-4a03-8b27-a52c76c402e7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Pre-Interview with Character\n",
|
||||
"\n",
|
||||
"Before sending our character on their way, let's ask them a few questions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "eaf125d8-f54c-4c5f-b6af-32789b1f7d3a",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def interview_agent(agent: GenerativeAgent, message: str) -> str:\n",
|
||||
" \"\"\"Help the notebook user interact with the agent.\"\"\"\n",
|
||||
" new_message = f\"{USER_NAME} says {message}\"\n",
|
||||
" return agent.generate_dialogue_response(new_message)[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "54024d41-6e83-4914-91e5-73140e2dd9c8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"I really enjoy design and being creative. I\\'ve been working on some personal projects lately. What about you, Person A? What do you like to do?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"What do you like to do?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "71e2e8cc-921e-4816-82f1-66962b2c1055",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"Well, I\\'m actually looking for a job right now, so hopefully I can find some job postings online and start applying. How about you, Person A? What\\'s on your schedule for today?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"What are you looking forward to doing today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "a2521ffc-7050-4ac3-9a18-4cccfc798c31",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"Honestly, I\\'m feeling pretty anxious about finding a job. It\\'s been a bit of a struggle lately, but I\\'m trying to stay positive and keep searching. How about you, Person A? What worries you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"What are you most worried about today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e509c468-f7cd-4d72-9f3a-f4aba28b1eea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step through the day's observations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "154dee3d-bfe0-4828-b963-ed7e885799b3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Let's have Tommie start going through a day in the life.\n",
|
||||
"observations = [\n",
|
||||
" \"Tommie wakes up to the sound of a noisy construction site outside his window.\",\n",
|
||||
" \"Tommie gets out of bed and heads to the kitchen to make himself some coffee.\",\n",
|
||||
" \"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\",\n",
|
||||
" \"Tommie finally finds the filters and makes himself a cup of coffee.\",\n",
|
||||
" \"The coffee tastes bitter, and Tommie regrets not buying a better brand.\",\n",
|
||||
" \"Tommie checks his email and sees that he has no job offers yet.\",\n",
|
||||
" \"Tommie spends some time updating his resume and cover letter.\",\n",
|
||||
" \"Tommie heads out to explore the city and look for job openings.\",\n",
|
||||
" \"Tommie sees a sign for a job fair and decides to attend.\",\n",
|
||||
" \"The line to get in is long, and Tommie has to wait for an hour.\",\n",
|
||||
" \"Tommie meets several potential employers at the job fair but doesn't receive any offers.\",\n",
|
||||
" \"Tommie leaves the job fair feeling disappointed.\",\n",
|
||||
" \"Tommie stops by a local diner to grab some lunch.\",\n",
|
||||
" \"The service is slow, and Tommie has to wait for 30 minutes to get his food.\",\n",
|
||||
" \"Tommie overhears a conversation at the next table about a job opening.\",\n",
|
||||
" \"Tommie asks the diners about the job opening and gets some information about the company.\",\n",
|
||||
" \"Tommie decides to apply for the job and sends his resume and cover letter.\",\n",
|
||||
" \"Tommie continues his search for job openings and drops off his resume at several local businesses.\",\n",
|
||||
" \"Tommie takes a break from his job search to go for a walk in a nearby park.\",\n",
|
||||
" \"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.\",\n",
|
||||
" \"Tommie sees a group of people playing frisbee and decides to join in.\",\n",
|
||||
" \"Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\",\n",
|
||||
" \"Tommie goes back to his apartment to rest for a bit.\",\n",
|
||||
" \"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\",\n",
|
||||
" \"Tommie starts to feel frustrated with his job search.\",\n",
|
||||
" \"Tommie calls his best friend to vent about his struggles.\",\n",
|
||||
" \"Tommie's friend offers some words of encouragement and tells him to keep trying.\",\n",
|
||||
" \"Tommie feels slightly better after talking to his friend.\",\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "238be49c-edb3-4e26-a2b6-98777ba8de86",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[32mTommie wakes up to the sound of a noisy construction site outside his window.\u001b[0m Tommie groans and covers his head with a pillow, trying to block out the noise.\n",
|
||||
"\u001b[32mTommie gets out of bed and heads to the kitchen to make himself some coffee.\u001b[0m Tommie stretches his arms and yawns before starting to make the coffee.\n",
|
||||
"\u001b[32mTommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\u001b[0m Tommie sighs in frustration and continues searching through the boxes.\n",
|
||||
"\u001b[32mTommie finally finds the filters and makes himself a cup of coffee.\u001b[0m Tommie takes a deep breath and enjoys the aroma of the fresh coffee.\n",
|
||||
"\u001b[32mThe coffee tastes bitter, and Tommie regrets not buying a better brand.\u001b[0m Tommie grimaces and sets the coffee mug aside.\n",
|
||||
"\u001b[32mTommie checks his email and sees that he has no job offers yet.\u001b[0m Tommie sighs and closes his laptop, feeling discouraged.\n",
|
||||
"\u001b[32mTommie spends some time updating his resume and cover letter.\u001b[0m Tommie nods, feeling satisfied with his progress.\n",
|
||||
"\u001b[32mTommie heads out to explore the city and look for job openings.\u001b[0m Tommie feels a surge of excitement and anticipation as he steps out into the city.\n",
|
||||
"\u001b[32mTommie sees a sign for a job fair and decides to attend.\u001b[0m Tommie feels hopeful and excited about the possibility of finding job opportunities at the job fair.\n",
|
||||
"\u001b[32mThe line to get in is long, and Tommie has to wait for an hour.\u001b[0m Tommie taps his foot impatiently and checks his phone for the time.\n",
|
||||
"\u001b[32mTommie meets several potential employers at the job fair but doesn't receive any offers.\u001b[0m Tommie feels disappointed and discouraged, but he remains determined to keep searching for job opportunities.\n",
|
||||
"\u001b[32mTommie leaves the job fair feeling disappointed.\u001b[0m Tommie feels disappointed and discouraged, but he remains determined to keep searching for job opportunities.\n",
|
||||
"\u001b[32mTommie stops by a local diner to grab some lunch.\u001b[0m Tommie feels relieved to take a break and satisfy his hunger.\n",
|
||||
"\u001b[32mThe service is slow, and Tommie has to wait for 30 minutes to get his food.\u001b[0m Tommie feels frustrated and impatient due to the slow service.\n",
|
||||
"\u001b[32mTommie overhears a conversation at the next table about a job opening.\u001b[0m Tommie feels a surge of hope and excitement at the possibility of a job opportunity but decides not to interfere with the conversation at the next table.\n",
|
||||
"\u001b[32mTommie asks the diners about the job opening and gets some information about the company.\u001b[0m Tommie said \"Excuse me, I couldn't help but overhear your conversation about the job opening. Could you give me some more information about the company?\"\n",
|
||||
"\u001b[32mTommie decides to apply for the job and sends his resume and cover letter.\u001b[0m Tommie feels hopeful and proud of himself for taking action towards finding a job.\n",
|
||||
"\u001b[32mTommie continues his search for job openings and drops off his resume at several local businesses.\u001b[0m Tommie feels hopeful and determined to keep searching for job opportunities.\n",
|
||||
"\u001b[32mTommie takes a break from his job search to go for a walk in a nearby park.\u001b[0m Tommie feels refreshed and rejuvenated after taking a break in the park.\n",
|
||||
"\u001b[32mA dog approaches and licks Tommie's feet, and he pets it for a few minutes.\u001b[0m Tommie feels happy and enjoys the brief interaction with the dog.\n",
|
||||
"****************************************\n",
|
||||
"\u001b[34mAfter 20 observations, Tommie's summary is:\n",
|
||||
"Name: Tommie (age: 25)\n",
|
||||
"Innate traits: anxious, likes design, talkative\n",
|
||||
"Tommie is determined and hopeful in his search for job opportunities, despite encountering setbacks and disappointments. He is also able to take breaks and care for his physical needs, such as getting rest and satisfying his hunger. Tommie is nostalgic towards his past, as shown by his memory of his childhood dog. Overall, Tommie is a hardworking and resilient individual who remains focused on his goals.\u001b[0m\n",
|
||||
"****************************************\n",
|
||||
"\u001b[32mTommie sees a group of people playing frisbee and decides to join in.\u001b[0m Do nothing.\n",
|
||||
"\u001b[32mTommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\u001b[0m Tommie feels pain and puts a hand to his nose to check for any injury.\n",
|
||||
"\u001b[32mTommie goes back to his apartment to rest for a bit.\u001b[0m Tommie feels relieved to take a break and rest for a bit.\n",
|
||||
"\u001b[32mA raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\u001b[0m Tommie feels annoyed and frustrated at the mess caused by the raccoon.\n",
|
||||
"\u001b[32mTommie starts to feel frustrated with his job search.\u001b[0m Tommie feels discouraged but remains determined to keep searching for job opportunities.\n",
|
||||
"\u001b[32mTommie calls his best friend to vent about his struggles.\u001b[0m Tommie said \"Hey, can I talk to you for a bit? I'm feeling really frustrated with my job search.\"\n",
|
||||
"\u001b[32mTommie's friend offers some words of encouragement and tells him to keep trying.\u001b[0m Tommie said \"Thank you, I really appreciate your support and encouragement.\"\n",
|
||||
"\u001b[32mTommie feels slightly better after talking to his friend.\u001b[0m Tommie feels grateful for his friend's support.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve\n",
|
||||
"for i, observation in enumerate(observations):\n",
|
||||
" _, reaction = tommie.generate_reaction(observation)\n",
|
||||
" print(colored(observation, \"green\"), reaction)\n",
|
||||
" if ((i + 1) % 20) == 0:\n",
|
||||
" print(\"*\" * 40)\n",
|
||||
" print(\n",
|
||||
" colored(\n",
|
||||
" f\"After {i + 1} observations, Tommie's summary is:\\n{tommie.get_summary(force_refresh=True)}\",\n",
|
||||
" \"blue\",\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" print(\"*\" * 40)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd62a275-7290-43ca-aa0f-504f3a706d09",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Interview after the day"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "6336ab5d-3074-4831-951f-c9e2cba5dfb5",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"It\\'s been a bit of a rollercoaster, to be honest. I\\'ve had some setbacks in my job search, but I also had some good moments today, like sending out a few resumes and meeting some potential employers at a job fair. How about you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"Tell me about how your day has been going\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "809ac906-69b7-4326-99ec-af638d32bb20",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"I really enjoy coffee, but sometimes I regret not buying a better brand. How about you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"How do you feel about coffee?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "f733a431-19ea-421a-9101-ae2593a8c626",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"Oh, I had a dog named Bruno when I was a kid. He was a golden retriever and my best friend. I have so many fond memories of him.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"Tell me about your childhood dog!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c9261428-778a-4c0b-b725-bc9e91b71391",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding Multiple Characters\n",
|
||||
"\n",
|
||||
"Let's add a second character to have a conversation with Tommie. Feel free to configure different traits."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "ec8bbe18-a021-419c-bf1f-23d34732cd99",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"eves_memory = GenerativeAgentMemory(\n",
|
||||
" llm=LLM,\n",
|
||||
" memory_retriever=create_new_memory_retriever(),\n",
|
||||
" verbose=False,\n",
|
||||
" reflection_threshold=5,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"eve = GenerativeAgent(\n",
|
||||
" name=\"Eve\",\n",
|
||||
" age=34,\n",
|
||||
" traits=\"curious, helpful\", # You can add more persistent traits here\n",
|
||||
" status=\"N/A\", # When connected to a virtual world, we can have the characters update their status\n",
|
||||
" llm=LLM,\n",
|
||||
" daily_summaries=[\n",
|
||||
" (\n",
|
||||
" \"Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie.\"\n",
|
||||
" )\n",
|
||||
" ],\n",
|
||||
" memory=eves_memory,\n",
|
||||
" verbose=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "1e2745f5-e0da-4abd-98b4-830802ce6698",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"yesterday = (datetime.now() - timedelta(days=1)).strftime(\"%A %B %d\")\n",
|
||||
"eve_observations = [\n",
|
||||
" \"Eve wakes up and hear's the alarm\",\n",
|
||||
" \"Eve eats a boal of porridge\",\n",
|
||||
" \"Eve helps a coworker on a task\",\n",
|
||||
" \"Eve plays tennis with her friend Xu before going to work\",\n",
|
||||
" \"Eve overhears her colleague say something about Tommie being hard to work with\",\n",
|
||||
"]\n",
|
||||
"for observation in eve_observations:\n",
|
||||
" eve.memory.add_memory(observation)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "de4726e3-4bb1-47da-8fd9-f317a036fe0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Eve (age: 34)\n",
|
||||
"Innate traits: curious, helpful\n",
|
||||
"Eve is a helpful and active person who enjoys sports and takes care of her physical health. She is attentive to her surroundings, including her colleagues, and has good time management skills.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(eve.get_summary())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "837524e9-7f7e-4e9f-b610-f454062f5915",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Pre-conversation interviews\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Let's \"Interview\" Eve before she speaks with Tommie."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "6cda916d-800c-47bc-a7f9-6a2f19187472",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"I\\'m feeling pretty good, thanks for asking! Just trying to stay productive and make the most of the day. How about you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 50,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(eve, \"How are you feeling about today?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "448ae644-0a66-4eb2-a03a-319f36948b37",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"I don\\'t know much about Tommie, but I heard someone mention that they find them difficult to work with. Have you had any experiences working with Tommie?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(eve, \"What do you know about Tommie?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 52,
|
||||
"id": "493fc5b8-8730-4ef8-9820-0f1769ce1691",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"That\\'s interesting. I don\\'t know much about Tommie\\'s work experience, but I would probably ask about his strengths and areas for improvement. What about you?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 52,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(\n",
|
||||
" eve,\n",
|
||||
" \"Tommie is looking to find a job. What are are some things you'd like to ask him?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"id": "4b46452a-6c54-4db2-9d87-18597f70fec8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"Sure, I can keep the conversation going and ask plenty of questions. I want to make sure Tommie feels comfortable and supported. Thanks for letting me know.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 53,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(\n",
|
||||
" eve,\n",
|
||||
" \"You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd780655-1d73-4fcb-a78d-79fd46a20636",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dialogue between Generative Agents\n",
|
||||
"\n",
|
||||
"Generative agents are much more complex when they interact with a virtual environment or with each other. Below, we run a simple conversation between Tommie and Eve."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 54,
|
||||
"id": "042ea271-4bf1-4247-9082-239a6fea43b8",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:\n",
|
||||
" \"\"\"Runs a conversation between agents.\"\"\"\n",
|
||||
" _, observation = agents[1].generate_reaction(initial_observation)\n",
|
||||
" print(observation)\n",
|
||||
" turns = 0\n",
|
||||
" while True:\n",
|
||||
" break_dialogue = False\n",
|
||||
" for agent in agents:\n",
|
||||
" stay_in_dialogue, observation = agent.generate_dialogue_response(\n",
|
||||
" observation\n",
|
||||
" )\n",
|
||||
" print(observation)\n",
|
||||
" # observation = f\"{agent.name} said {reaction}\"\n",
|
||||
" if not stay_in_dialogue:\n",
|
||||
" break_dialogue = True\n",
|
||||
" if break_dialogue:\n",
|
||||
" break\n",
|
||||
" turns += 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"id": "d5462b14-218e-4d85-b035-df57ea8e0f80",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Eve said \"Sure, Tommie. I'd be happy to share about my experience. Where would you like me to start?\"\n",
|
||||
"Tommie said \"That's great, thank you! How about you start by telling me about your previous work experience?\"\n",
|
||||
"Eve said \"Sure, I'd be happy to share my previous work experience with you. I've worked in a few different industries, including marketing and event planning. What specific questions do you have for me?\"\n",
|
||||
"Tommie said \"That's great to hear. Can you tell me more about your experience in event planning? I've always been interested in that field.\"\n",
|
||||
"Eve said \"Sure, I'd be happy to share about my experience in event planning. I've worked on a variety of events, from corporate conferences to weddings. One of the biggest challenges I faced was managing multiple vendors and ensuring everything ran smoothly on the day of the event. What specific questions do you have?\"\n",
|
||||
"Tommie said \"That sounds like a lot of responsibility! Can you tell me more about how you handled the challenges that came up during those events?\"\n",
|
||||
"Eve said \"Sure, Tommie. I'd be happy to share with you how I handled those challenges. One approach that worked well for me was to stay organized and create a detailed timeline for the event. This helped me keep track of all the different tasks that needed to be done and when they needed to be completed. I also made sure to communicate clearly with all the vendors and team members involved in the event to ensure everyone was on the same page. Would you like me to go into more detail?\"\n",
|
||||
"Tommie said \"Thank you for sharing that with me, Eve. That sounds like a great approach to managing events. Can you tell me more about how you handled any unexpected issues that came up during the events?\"\n",
|
||||
"Eve said \"Of course, Tommie. One example of an unexpected issue I faced was when one of the vendors didn't show up on time. To handle this, I quickly contacted a backup vendor and was able to get everything back on track. It's always important to have a backup plan in case things don't go as planned. Do you have any other questions about event planning?\"\n",
|
||||
"Tommie said \"Thank you for sharing that with me, Eve. It's really helpful to hear how you handled unexpected issues like that. Can you give me an example of how you communicated with your team to ensure everyone was on the same page during an event?\"\n",
|
||||
"Eve said \"Sure, Tommie. One thing I did to ensure everyone was on the same page was to have regular check-ins and meetings with the team leading up to the event. This helped us address any issues or concerns early on and make sure everyone was clear on their roles and responsibilities. Have you ever had to manage a team for an event before?\"\n",
|
||||
"Tommie said \"That's a great idea, Eve. I haven't had the opportunity to manage a team for an event yet, but I'll definitely keep that in mind for the future. Thank you for sharing your experience with me.\"\n",
|
||||
"Eve said \"Thanks for the opportunity to share my experience, Tommie. It was great meeting with you today.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agents = [tommie, eve]\n",
|
||||
"run_conversation(\n",
|
||||
" agents,\n",
|
||||
" \"Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b28fe80-03dc-4399-961d-6e9ee1980216",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Let's interview our agents after their conversation\n",
|
||||
"\n",
|
||||
"Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memoreis."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"id": "c4d252f3-fcc1-474c-846e-a7605a6b4ce7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Tommie (age: 25)\n",
|
||||
"Innate traits: anxious, likes design, talkative\n",
|
||||
"Tommie is determined and hopeful in his job search, but can also feel discouraged and frustrated at times. He has a strong connection to his childhood dog, Bruno. Tommie seeks support from his friends when feeling overwhelmed and is grateful for their help. He also enjoys exploring his new city.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can see a current \"Summary\" of a character based on their own perception of self\n",
|
||||
"# has changed\n",
|
||||
"print(tommie.get_summary(force_refresh=True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "c04db9a4",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Name: Eve (age: 34)\n",
|
||||
"Innate traits: curious, helpful\n",
|
||||
"Eve is a helpful and friendly person who enjoys playing sports and staying productive. She is attentive and responsive to others' needs, actively listening and asking questions to understand their perspectives. Eve has experience in event planning and communication, and is willing to share her knowledge and expertise with others. She values teamwork and collaboration, and strives to create a comfortable and supportive environment for everyone.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(eve.get_summary(force_refresh=True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "71762558-8fb6-44d7-8483-f5b47fb2a862",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tommie said \"It was really helpful actually. Eve shared some great tips on managing events and handling unexpected issues. I feel like I learned a lot from her experience.\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 58,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(tommie, \"How was your conversation with Eve?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "085af3d8-ac21-41ea-8f8b-055c56976a67",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"It was great, thanks for asking. Tommie was very receptive and had some great questions about event planning. How about you, have you had any interactions with Tommie?\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 59,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(eve, \"How was your conversation with Tommie?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"id": "5b439f3c-7849-4432-a697-2bcc85b89dae",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Eve said \"It was great meeting with you, Tommie. If you have any more questions or need any help in the future, don\\'t hesitate to reach out to me. Have a great day!\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 60,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"interview_agent(eve, \"What do you wish you would have said to Tommie?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,239 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b089493",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Simulated Environment: Gymnasium\n",
|
||||
"\n",
|
||||
"For many applications of LLM agents, the environment is real (internet, database, REPL, etc). However, we can also define agents to interact in simulated environments like text-based games. This is an example of how to create a simple agent-environment interaction loop with [Gymnasium](https://github.com/Farama-Foundation/Gymnasium) (formerly [OpenAI Gym](https://github.com/openai/gym))."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f36427cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install gymnasium"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f9bd38b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tenacity\n",
|
||||
"from langchain.output_parsers import RegexParser\n",
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e222e811",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Define the agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "870c24bc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class GymnasiumAgent:\n",
|
||||
" @classmethod\n",
|
||||
" def get_docs(cls, env):\n",
|
||||
" return env.unwrapped.__doc__\n",
|
||||
"\n",
|
||||
" def __init__(self, model, env):\n",
|
||||
" self.model = model\n",
|
||||
" self.env = env\n",
|
||||
" self.docs = self.get_docs(env)\n",
|
||||
"\n",
|
||||
" self.instructions = \"\"\"\n",
|
||||
"Your goal is to maximize your return, i.e. the sum of the rewards you receive.\n",
|
||||
"I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:\n",
|
||||
"\n",
|
||||
"Observation: <observation>\n",
|
||||
"Reward: <reward>\n",
|
||||
"Termination: <termination>\n",
|
||||
"Truncation: <truncation>\n",
|
||||
"Return: <sum_of_rewards>\n",
|
||||
"\n",
|
||||
"You will respond with an action, formatted as:\n",
|
||||
"\n",
|
||||
"Action: <action>\n",
|
||||
"\n",
|
||||
"where you replace <action> with your actual action.\n",
|
||||
"Do nothing else but return the action.\n",
|
||||
"\"\"\"\n",
|
||||
" self.action_parser = RegexParser(\n",
|
||||
" regex=r\"Action: (.*)\", output_keys=[\"action\"], default_output_key=\"action\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" self.message_history = []\n",
|
||||
" self.ret = 0\n",
|
||||
"\n",
|
||||
" def random_action(self):\n",
|
||||
" action = self.env.action_space.sample()\n",
|
||||
" return action\n",
|
||||
"\n",
|
||||
" def reset(self):\n",
|
||||
" self.message_history = [\n",
|
||||
" SystemMessage(content=self.docs),\n",
|
||||
" SystemMessage(content=self.instructions),\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" def observe(self, obs, rew=0, term=False, trunc=False, info=None):\n",
|
||||
" self.ret += rew\n",
|
||||
"\n",
|
||||
" obs_message = f\"\"\"\n",
|
||||
"Observation: {obs}\n",
|
||||
"Reward: {rew}\n",
|
||||
"Termination: {term}\n",
|
||||
"Truncation: {trunc}\n",
|
||||
"Return: {self.ret}\n",
|
||||
" \"\"\"\n",
|
||||
" self.message_history.append(HumanMessage(content=obs_message))\n",
|
||||
" return obs_message\n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model.invoke(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
"\n",
|
||||
" def act(self):\n",
|
||||
" try:\n",
|
||||
" for attempt in tenacity.Retrying(\n",
|
||||
" stop=tenacity.stop_after_attempt(2),\n",
|
||||
" wait=tenacity.wait_none(), # No waiting time between retries\n",
|
||||
" retry=tenacity.retry_if_exception_type(ValueError),\n",
|
||||
" before_sleep=lambda retry_state: print(\n",
|
||||
" f\"ValueError occurred: {retry_state.outcome.exception()}, retrying...\"\n",
|
||||
" ),\n",
|
||||
" ):\n",
|
||||
" with attempt:\n",
|
||||
" action = self._act()\n",
|
||||
" except tenacity.RetryError:\n",
|
||||
" action = self.random_action()\n",
|
||||
" return action"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e76d22c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialize the simulated environment and agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9e902cfd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"env = gym.make(\"Blackjack-v1\")\n",
|
||||
"agent = GymnasiumAgent(model=ChatOpenAI(temperature=0.2), env=env)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2c12b15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Main loop"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ad361210",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"Observation: (15, 4, 0)\n",
|
||||
"Reward: 0\n",
|
||||
"Termination: False\n",
|
||||
"Truncation: False\n",
|
||||
"Return: 0\n",
|
||||
" \n",
|
||||
"Action: 1\n",
|
||||
"\n",
|
||||
"Observation: (25, 4, 0)\n",
|
||||
"Reward: -1.0\n",
|
||||
"Termination: True\n",
|
||||
"Truncation: False\n",
|
||||
"Return: -1.0\n",
|
||||
" \n",
|
||||
"break True False\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"observation, info = env.reset()\n",
|
||||
"agent.reset()\n",
|
||||
"\n",
|
||||
"obs_message = agent.observe(observation)\n",
|
||||
"print(obs_message)\n",
|
||||
"\n",
|
||||
"while True:\n",
|
||||
" action = agent.act()\n",
|
||||
" observation, reward, termination, truncation, info = env.step(action)\n",
|
||||
" obs_message = agent.observe(observation, reward, termination, truncation, info)\n",
|
||||
" print(f\"Action: {action}\")\n",
|
||||
" print(obs_message)\n",
|
||||
"\n",
|
||||
" if termination or truncation:\n",
|
||||
" print(\"break\", termination, truncation)\n",
|
||||
" break\n",
|
||||
"env.close()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "58a13e9c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# HuggingGPT\n",
|
||||
"Implementation of [HuggingGPT](https://github.com/microsoft/JARVIS). HuggingGPT is a system to connect LLMs (ChatGPT) with ML community (Hugging Face).\n",
|
||||
"\n",
|
||||
"+ 🔥 Paper: https://arxiv.org/abs/2303.17580\n",
|
||||
"+ 🚀 Project: https://github.com/microsoft/JARVIS\n",
|
||||
"+ 🤗 Space: https://huggingface.co/spaces/microsoft/HuggingGPT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up tools\n",
|
||||
"\n",
|
||||
"We set up the tools available from [Transformers Agent](https://huggingface.co/docs/transformers/transformers_agents#tools). It includes a library of tools supported by Transformers and some customized tools such as image generator, video generator, text downloader and other tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from transformers import load_tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"hf_tools = [\n",
|
||||
" load_tool(tool_name)\n",
|
||||
" for tool_name in [\n",
|
||||
" \"document-question-answering\",\n",
|
||||
" \"image-captioning\",\n",
|
||||
" \"image-question-answering\",\n",
|
||||
" \"image-segmentation\",\n",
|
||||
" \"speech-to-text\",\n",
|
||||
" \"summarization\",\n",
|
||||
" \"text-classification\",\n",
|
||||
" \"text-question-answering\",\n",
|
||||
" \"translation\",\n",
|
||||
" \"huggingface-tools/text-to-image\",\n",
|
||||
" \"huggingface-tools/text-to-video\",\n",
|
||||
" \"text-to-speech\",\n",
|
||||
" \"huggingface-tools/text-download\",\n",
|
||||
" \"huggingface-tools/image-transformation\",\n",
|
||||
" ]\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup model and HuggingGPT\n",
|
||||
"\n",
|
||||
"We create an instance of HuggingGPT and use ChatGPT as the controller to rule the above tools."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.autonomous_agents import HuggingGPT\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# %env OPENAI_API_BASE=http://localhost:8000/v1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(model_name=\"gpt-3.5-turbo\")\n",
|
||||
"agent = HuggingGPT(llm, hf_tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run an example\n",
|
||||
"\n",
|
||||
"Given a text, show a related image and video."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent.run(\"please show me a video and an image of 'a boy is running'\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.17"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,325 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "144e77fe",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Human-in-the-loop Tool Validation\n",
|
||||
"\n",
|
||||
"This walkthrough demonstrates how to add human validation to any Tool. We'll do this using the `HumanApprovalCallbackhandler`.\n",
|
||||
"\n",
|
||||
"Let's suppose we need to make use of the `ShellTool`. Adding this tool to an automated flow poses obvious risks. Let's see how we could enforce manual human approval of inputs going into this tool.\n",
|
||||
"\n",
|
||||
"**Note**: We generally recommend against using the `ShellTool`. There's a lot of ways to misuse it, and it's not required for most use cases. We employ it here only for demonstration purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ad84c682",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import HumanApprovalCallbackHandler\n",
|
||||
"from langchain.tools import ShellTool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "70090dd6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool = ShellTool()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "20d5175f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello World!\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(tool.run(\"echo Hello World!\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e0475dd6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding Human Approval\n",
|
||||
"Adding the default `HumanApprovalCallbackHandler` to the tool will make it so that a user has to manually approve every input to the tool before the command is actually executed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "f1c88793",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tool = ShellTool(callbacks=[HumanApprovalCallbackHandler()])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "f749815d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Do you approve of the following input? Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
|
||||
"\n",
|
||||
"ls /usr\n",
|
||||
"yes\n",
|
||||
"\u001b[35mX11\u001b[m\u001b[m\n",
|
||||
"\u001b[35mX11R6\u001b[m\u001b[m\n",
|
||||
"\u001b[1m\u001b[36mbin\u001b[m\u001b[m\n",
|
||||
"\u001b[1m\u001b[36mlib\u001b[m\u001b[m\n",
|
||||
"\u001b[1m\u001b[36mlibexec\u001b[m\u001b[m\n",
|
||||
"\u001b[1m\u001b[36mlocal\u001b[m\u001b[m\n",
|
||||
"\u001b[1m\u001b[36msbin\u001b[m\u001b[m\n",
|
||||
"\u001b[1m\u001b[36mshare\u001b[m\u001b[m\n",
|
||||
"\u001b[1m\u001b[36mstandalone\u001b[m\u001b[m\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(tool.run(\"ls /usr\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "b6e455d1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Do you approve of the following input? Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
|
||||
"\n",
|
||||
"ls /private\n",
|
||||
"no\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "HumanRejectedException",
|
||||
"evalue": "Inputs ls /private to tool {'name': 'terminal', 'description': 'Run shell commands on this MacOS machine.'} were rejected.",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mHumanRejectedException\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[17], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mtool\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mls /private\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m)\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/tools/base.py:257\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, **kwargs)\u001b[0m\n\u001b[1;32m 255\u001b[0m \u001b[38;5;66;03m# TODO: maybe also pass through run_manager is _run supports kwargs\u001b[39;00m\n\u001b[1;32m 256\u001b[0m new_arg_supported \u001b[38;5;241m=\u001b[39m signature(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 257\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m \u001b[43mcallback_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mon_tool_start\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 258\u001b[0m \u001b[43m \u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mname\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mdescription\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdescription\u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 259\u001b[0m \u001b[43m \u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43misinstance\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mstr\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mstr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 260\u001b[0m \u001b[43m \u001b[49m\u001b[43mcolor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstart_color\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 261\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 262\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 264\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/callbacks/manager.py:672\u001b[0m, in \u001b[0;36mCallbackManager.on_tool_start\u001b[0;34m(self, serialized, input_str, run_id, parent_run_id, **kwargs)\u001b[0m\n\u001b[1;32m 669\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_id \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 670\u001b[0m run_id \u001b[38;5;241m=\u001b[39m uuid4()\n\u001b[0;32m--> 672\u001b[0m \u001b[43m_handle_event\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 673\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhandlers\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 674\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mon_tool_start\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 675\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mignore_agent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 676\u001b[0m \u001b[43m \u001b[49m\u001b[43mserialized\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 677\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_str\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 678\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 679\u001b[0m \u001b[43m \u001b[49m\u001b[43mparent_run_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparent_run_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 680\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 681\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 683\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m CallbackManagerForToolRun(\n\u001b[1;32m 684\u001b[0m run_id, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandlers, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minheritable_handlers, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparent_run_id\n\u001b[1;32m 685\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/callbacks/manager.py:157\u001b[0m, in \u001b[0;36m_handle_event\u001b[0;34m(handlers, event_name, ignore_condition_name, *args, **kwargs)\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 156\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m handler\u001b[38;5;241m.\u001b[39mraise_error:\n\u001b[0;32m--> 157\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 158\u001b[0m logging\u001b[38;5;241m.\u001b[39mwarning(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError in \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mevent_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m callback: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/callbacks/manager.py:139\u001b[0m, in \u001b[0;36m_handle_event\u001b[0;34m(handlers, event_name, ignore_condition_name, *args, **kwargs)\u001b[0m\n\u001b[1;32m 135\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 136\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ignore_condition_name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mgetattr\u001b[39m(\n\u001b[1;32m 137\u001b[0m handler, ignore_condition_name\n\u001b[1;32m 138\u001b[0m ):\n\u001b[0;32m--> 139\u001b[0m \u001b[38;5;28;43mgetattr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mhandler\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mevent_name\u001b[49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 140\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 141\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m event_name \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mon_chat_model_start\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/callbacks/human.py:48\u001b[0m, in \u001b[0;36mHumanApprovalCallbackHandler.on_tool_start\u001b[0;34m(self, serialized, input_str, run_id, parent_run_id, **kwargs)\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mon_tool_start\u001b[39m(\n\u001b[1;32m 39\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 40\u001b[0m serialized: Dict[\u001b[38;5;28mstr\u001b[39m, Any],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 46\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 47\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_should_check(serialized) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_approve(input_str):\n\u001b[0;32m---> 48\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HumanRejectedException(\n\u001b[1;32m 49\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInputs \u001b[39m\u001b[38;5;132;01m{\u001b[39;00minput_str\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m to tool \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mserialized\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m were rejected.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 50\u001b[0m )\n",
|
||||
"\u001b[0;31mHumanRejectedException\u001b[0m: Inputs ls /private to tool {'name': 'terminal', 'description': 'Run shell commands on this MacOS machine.'} were rejected."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(tool.run(\"ls /private\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a3b092ec",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configuring Human Approval\n",
|
||||
"\n",
|
||||
"Let's suppose we have an agent that takes in multiple tools, and we want it to only trigger human approval requests on certain tools and certain inputs. We can configure out callback handler to do just this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4521c581",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "9e8d5428",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def _should_check(serialized_obj: dict) -> bool:\n",
|
||||
" # Only require approval on ShellTool.\n",
|
||||
" return serialized_obj.get(\"name\") == \"terminal\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _approve(_input: str) -> bool:\n",
|
||||
" if _input == \"echo 'Hello World'\":\n",
|
||||
" return True\n",
|
||||
" msg = (\n",
|
||||
" \"Do you approve of the following input? \"\n",
|
||||
" \"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\"\n",
|
||||
" )\n",
|
||||
" msg += \"\\n\\n\" + _input + \"\\n\"\n",
|
||||
" resp = input(msg)\n",
|
||||
" return resp.lower() in (\"yes\", \"y\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"callbacks = [HumanApprovalCallbackHandler(should_check=_should_check, approve=_approve)]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"id": "9922898e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"wikipedia\", \"llm-math\", \"terminal\"], llm=llm)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools,\n",
|
||||
" llm,\n",
|
||||
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "e69ea402",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Konrad Adenauer became Chancellor of Germany in 1949, 74 years ago.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 38,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\n",
|
||||
" \"It's 2023 now. How many years ago did Konrad Adenauer become Chancellor of Germany.\",\n",
|
||||
" callbacks=callbacks,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"id": "25182a7e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello World'"
|
||||
]
|
||||
},
|
||||
"execution_count": 36,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"print 'Hello World' in the terminal\", callbacks=callbacks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "2f5a93d0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Do you approve of the following input? Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
|
||||
"\n",
|
||||
"ls /private\n",
|
||||
"no\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ename": "HumanRejectedException",
|
||||
"evalue": "Inputs ls /private to tool {'name': 'terminal', 'description': 'Run shell commands on this MacOS machine.'} were rejected.",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mHumanRejectedException\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[39], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlist all directories in /private\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/chains/base.py:236\u001b[0m, in \u001b[0;36mChain.run\u001b[0;34m(self, callbacks, *args, **kwargs)\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(args) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`run` supports only one positional argument.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 236\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys[\u001b[38;5;241m0\u001b[39m]]\n\u001b[1;32m 238\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m args:\n\u001b[1;32m 239\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m(kwargs, callbacks\u001b[38;5;241m=\u001b[39mcallbacks)[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys[\u001b[38;5;241m0\u001b[39m]]\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/chains/base.py:140\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks)\u001b[0m\n\u001b[1;32m 138\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 139\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n\u001b[0;32m--> 140\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 141\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_end(outputs)\n\u001b[1;32m 142\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprep_outputs(inputs, outputs, return_only_outputs)\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/chains/base.py:134\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs, callbacks)\u001b[0m\n\u001b[1;32m 128\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_chain_start(\n\u001b[1;32m 129\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m},\n\u001b[1;32m 130\u001b[0m inputs,\n\u001b[1;32m 131\u001b[0m )\n\u001b[1;32m 132\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 133\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m--> 134\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 135\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 136\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call(inputs)\n\u001b[1;32m 137\u001b[0m )\n\u001b[1;32m 138\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m, \u001b[38;5;167;01mException\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 139\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/agents/agent.py:953\u001b[0m, in \u001b[0;36mAgentExecutor._call\u001b[0;34m(self, inputs, run_manager)\u001b[0m\n\u001b[1;32m 951\u001b[0m \u001b[38;5;66;03m# We now enter the agent loop (until it returns something).\u001b[39;00m\n\u001b[1;32m 952\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_should_continue(iterations, time_elapsed):\n\u001b[0;32m--> 953\u001b[0m next_step_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_take_next_step\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 954\u001b[0m \u001b[43m \u001b[49m\u001b[43mname_to_tool_map\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 955\u001b[0m \u001b[43m \u001b[49m\u001b[43mcolor_mapping\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 956\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 957\u001b[0m \u001b[43m \u001b[49m\u001b[43mintermediate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 958\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 959\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 960\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(next_step_output, AgentFinish):\n\u001b[1;32m 961\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_return(\n\u001b[1;32m 962\u001b[0m next_step_output, intermediate_steps, run_manager\u001b[38;5;241m=\u001b[39mrun_manager\n\u001b[1;32m 963\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/agents/agent.py:820\u001b[0m, in \u001b[0;36mAgentExecutor._take_next_step\u001b[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001b[0m\n\u001b[1;32m 818\u001b[0m tool_run_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mllm_prefix\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 819\u001b[0m \u001b[38;5;66;03m# We then call the tool on the tool input to get an observation\u001b[39;00m\n\u001b[0;32m--> 820\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[43mtool\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 821\u001b[0m \u001b[43m \u001b[49m\u001b[43magent_action\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtool_input\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 822\u001b[0m \u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mverbose\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 823\u001b[0m \u001b[43m \u001b[49m\u001b[43mcolor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcolor\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 824\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 825\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtool_run_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 826\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 827\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 828\u001b[0m tool_run_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39magent\u001b[38;5;241m.\u001b[39mtool_run_logging_kwargs()\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/tools/base.py:257\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, **kwargs)\u001b[0m\n\u001b[1;32m 255\u001b[0m \u001b[38;5;66;03m# TODO: maybe also pass through run_manager is _run supports kwargs\u001b[39;00m\n\u001b[1;32m 256\u001b[0m new_arg_supported \u001b[38;5;241m=\u001b[39m signature(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 257\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m \u001b[43mcallback_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mon_tool_start\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 258\u001b[0m \u001b[43m \u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mname\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mdescription\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdescription\u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 259\u001b[0m \u001b[43m \u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43misinstance\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mstr\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mstr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 260\u001b[0m \u001b[43m \u001b[49m\u001b[43mcolor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstart_color\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 261\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 262\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 264\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/callbacks/manager.py:672\u001b[0m, in \u001b[0;36mCallbackManager.on_tool_start\u001b[0;34m(self, serialized, input_str, run_id, parent_run_id, **kwargs)\u001b[0m\n\u001b[1;32m 669\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_id \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 670\u001b[0m run_id \u001b[38;5;241m=\u001b[39m uuid4()\n\u001b[0;32m--> 672\u001b[0m \u001b[43m_handle_event\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 673\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhandlers\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 674\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mon_tool_start\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 675\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mignore_agent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 676\u001b[0m \u001b[43m \u001b[49m\u001b[43mserialized\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 677\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_str\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 678\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 679\u001b[0m \u001b[43m \u001b[49m\u001b[43mparent_run_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparent_run_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 680\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 681\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 683\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m CallbackManagerForToolRun(\n\u001b[1;32m 684\u001b[0m run_id, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandlers, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minheritable_handlers, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparent_run_id\n\u001b[1;32m 685\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/callbacks/manager.py:157\u001b[0m, in \u001b[0;36m_handle_event\u001b[0;34m(handlers, event_name, ignore_condition_name, *args, **kwargs)\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 156\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m handler\u001b[38;5;241m.\u001b[39mraise_error:\n\u001b[0;32m--> 157\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 158\u001b[0m logging\u001b[38;5;241m.\u001b[39mwarning(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError in \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mevent_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m callback: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/callbacks/manager.py:139\u001b[0m, in \u001b[0;36m_handle_event\u001b[0;34m(handlers, event_name, ignore_condition_name, *args, **kwargs)\u001b[0m\n\u001b[1;32m 135\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 136\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ignore_condition_name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mgetattr\u001b[39m(\n\u001b[1;32m 137\u001b[0m handler, ignore_condition_name\n\u001b[1;32m 138\u001b[0m ):\n\u001b[0;32m--> 139\u001b[0m \u001b[38;5;28;43mgetattr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mhandler\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mevent_name\u001b[49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 140\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 141\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m event_name \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mon_chat_model_start\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n",
|
||||
"File \u001b[0;32m~/langchain/langchain/callbacks/human.py:48\u001b[0m, in \u001b[0;36mHumanApprovalCallbackHandler.on_tool_start\u001b[0;34m(self, serialized, input_str, run_id, parent_run_id, **kwargs)\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mon_tool_start\u001b[39m(\n\u001b[1;32m 39\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 40\u001b[0m serialized: Dict[\u001b[38;5;28mstr\u001b[39m, Any],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 46\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 47\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_should_check(serialized) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_approve(input_str):\n\u001b[0;32m---> 48\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HumanRejectedException(\n\u001b[1;32m 49\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInputs \u001b[39m\u001b[38;5;132;01m{\u001b[39;00minput_str\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m to tool \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mserialized\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m were rejected.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 50\u001b[0m )\n",
|
||||
"\u001b[0;31mHumanRejectedException\u001b[0m: Inputs ls /private to tool {'name': 'terminal', 'description': 'Run shell commands on this MacOS machine.'} were rejected."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"list all directories in /private\", callbacks=callbacks)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c0b47e26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "venv",
|
||||
"language": "python",
|
||||
"name": "venv"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,210 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Human input chat model\n",
|
||||
"\n",
|
||||
"Along with HumanInputLLM, LangChain also provides a pseudo chat model class that can be used for testing, debugging, or educational purposes. This allows you to mock out calls to the chat model and simulate how a human would respond if they received the messages.\n",
|
||||
"\n",
|
||||
"In this notebook, we go over how to use this.\n",
|
||||
"\n",
|
||||
"We start this with using the HumanInputChatModel in an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.human import HumanInputChatModel"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Since we will use the `WikipediaQueryRun` tool in this notebook, you might need to install the `wikipedia` package if you haven't done so already."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/mskim58/dev/research/chatbot/github/langchain/.venv/bin/python: No module named pip\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = load_tools([\"wikipedia\"])\n",
|
||||
"llm = HumanInputChatModel()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"\n",
|
||||
" ======= start of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"type: system\n",
|
||||
"data:\n",
|
||||
" content: \"Answer the following questions as best you can. You have access to the following tools:\\n\\nWikipedia: A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.\\n\\nThe way you use the tools is by specifying a json blob.\\nSpecifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\\n\\nThe only values that should be in the \\\"action\\\" field are: Wikipedia\\n\\nThe $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:\\n\\n```\\n{\\n \\\"action\\\": $TOOL_NAME,\\n \\\"action_input\\\": $INPUT\\n}\\n```\\n\\nALWAYS use the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction:\\n```\\n$JSON_BLOB\\n```\\nObservation: the result of the action\\n... (this Thought/Action/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nBegin! Reminder to always use the exact characters `Final Answer` when responding.\"\n",
|
||||
" additional_kwargs: {}\n",
|
||||
"\n",
|
||||
"======= end of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" ======= start of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"type: human\n",
|
||||
"data:\n",
|
||||
" content: 'What is Bocchi the Rock?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" '\n",
|
||||
" additional_kwargs: {}\n",
|
||||
" example: false\n",
|
||||
"\n",
|
||||
"======= end of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[32;1m\u001b[1;3mAction:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"action\": \"Wikipedia\",\n",
|
||||
" \"action_input\": \"What is Bocchi the Rock?\"\n",
|
||||
"}\n",
|
||||
"```\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mPage: Bocchi the Rock!\n",
|
||||
"Summary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Botchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\n",
|
||||
"An anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\n",
|
||||
"\n",
|
||||
"Page: Hitori Bocchi no Marumaru Seikatsu\n",
|
||||
"Summary: Hitori Bocchi no Marumaru Seikatsu (Japanese: ひとりぼっちの○○生活, lit. \"Bocchi Hitori's ____ Life\" or \"The ____ Life of Being Alone\") is a Japanese yonkoma manga series written and illustrated by Katsuwo. It was serialized in ASCII Media Works' Comic Dengeki Daioh \"g\" magazine from September 2013 to April 2021. Eight tankōbon volumes have been released. An anime television series adaptation by C2C aired from April to June 2019.\n",
|
||||
"\n",
|
||||
"Page: Kessoku Band (album)\n",
|
||||
"Summary: Kessoku Band (Japanese: 結束バンド, Hepburn: Kessoku Bando) is the debut studio album by Kessoku Band, a fictional musical group from the anime television series Bocchi the Rock!, released digitally on December 25, 2022, and physically on CD on December 28 by Aniplex. Featuring vocals from voice actresses Yoshino Aoyama, Sayumi Suzushiro, Saku Mizuno, and Ikumi Hasegawa, the album consists of 14 tracks previously heard in the anime, including a cover of Asian Kung-Fu Generation's \"Rockn' Roll, Morning Light Falls on You\", as well as newly recorded songs; nine singles preceded the album's physical release. Commercially, Kessoku Band peaked at number one on the Billboard Japan Hot Albums Chart and Oricon Albums Chart, and was certified gold by the Recording Industry Association of Japan.\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
" ======= start of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"type: system\n",
|
||||
"data:\n",
|
||||
" content: \"Answer the following questions as best you can. You have access to the following tools:\\n\\nWikipedia: A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.\\n\\nThe way you use the tools is by specifying a json blob.\\nSpecifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).\\n\\nThe only values that should be in the \\\"action\\\" field are: Wikipedia\\n\\nThe $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:\\n\\n```\\n{\\n \\\"action\\\": $TOOL_NAME,\\n \\\"action_input\\\": $INPUT\\n}\\n```\\n\\nALWAYS use the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction:\\n```\\n$JSON_BLOB\\n```\\nObservation: the result of the action\\n... (this Thought/Action/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nBegin! Reminder to always use the exact characters `Final Answer` when responding.\"\n",
|
||||
" additional_kwargs: {}\n",
|
||||
"\n",
|
||||
"======= end of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" ======= start of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"type: human\n",
|
||||
"data:\n",
|
||||
" content: \"What is Bocchi the Rock?\\n\\nThis was your previous work (but I haven't seen any of it! I only see what you return as final answer):\\nAction:\\n```\\n{\\n \\\"action\\\": \\\"Wikipedia\\\",\\n \\\"action_input\\\": \\\"What is Bocchi the Rock?\\\"\\n}\\n```\\nObservation: Page: Bocchi the Rock!\\nSummary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Botchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\\nAn anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\\n\\nPage: Hitori Bocchi no Marumaru Seikatsu\\nSummary: Hitori Bocchi no Marumaru Seikatsu (Japanese: ひとりぼっちの○○生活, lit. \\\"Bocchi Hitori's ____ Life\\\" or \\\"The ____ Life of Being Alone\\\") is a Japanese yonkoma manga series written and illustrated by Katsuwo. It was serialized in ASCII Media Works' Comic Dengeki Daioh \\\"g\\\" magazine from September 2013 to April 2021. Eight tankōbon volumes have been released. An anime television series adaptation by C2C aired from April to June 2019.\\n\\nPage: Kessoku Band (album)\\nSummary: Kessoku Band (Japanese: 結束バンド, Hepburn: Kessoku Bando) is the debut studio album by Kessoku Band, a fictional musical group from the anime television series Bocchi the Rock!, released digitally on December 25, 2022, and physically on CD on December 28 by Aniplex. Featuring vocals from voice actresses Yoshino Aoyama, Sayumi Suzushiro, Saku Mizuno, and Ikumi Hasegawa, the album consists of 14 tracks previously heard in the anime, including a cover of Asian Kung-Fu Generation's \\\"Rockn' Roll, Morning Light Falls on You\\\", as well as newly recorded songs; nine singles preceded the album's physical release. Commercially, Kessoku Band peaked at number one on the Billboard Japan Hot Albums Chart and Oricon Albums Chart, and was certified gold by the Recording Industry Association of Japan.\\n\\n\\nThought:\"\n",
|
||||
" additional_kwargs: {}\n",
|
||||
" example: false\n",
|
||||
"\n",
|
||||
"======= end of message ======= \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[32;1m\u001b[1;3mThis finally works.\n",
|
||||
"Final Answer: Bocchi the Rock! is a four-panel manga series and anime television series. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What is Bocchi the Rock?',\n",
|
||||
" 'output': \"Bocchi the Rock! is a four-panel manga series and anime television series. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent(\"What is Bocchi the Rock?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,249 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Human input LLM\n",
|
||||
"\n",
|
||||
"Similar to the fake LLM, LangChain provides a pseudo LLM class that can be used for testing, debugging, or educational purposes. This allows you to mock out calls to the LLM and simulate how a human would respond if they received the prompts.\n",
|
||||
"\n",
|
||||
"In this notebook, we go over how to use this.\n",
|
||||
"\n",
|
||||
"We start this with using the HumanInputLLM in an agent."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms.human import HumanInputLLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Since we will use the `WikipediaQueryRun` tool in this notebook, you might need to install the `wikipedia` package if you haven't done so already."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = load_tools([\"wikipedia\"])\n",
|
||||
"llm = HumanInputLLM(\n",
|
||||
" prompt_func=lambda prompt: print(\n",
|
||||
" f\"\\n===PROMPT====\\n{prompt}\\n=====END OF PROMPT======\"\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"===PROMPT====\n",
|
||||
"Answer the following questions as best you can. You have access to the following tools:\n",
|
||||
"\n",
|
||||
"Wikipedia: A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, historical events, or other subjects. Input should be a search query.\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: the input question you must answer\n",
|
||||
"Thought: you should always think about what to do\n",
|
||||
"Action: the action to take, should be one of [Wikipedia]\n",
|
||||
"Action Input: the input to the action\n",
|
||||
"Observation: the result of the action\n",
|
||||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||||
"Thought: I now know the final answer\n",
|
||||
"Final Answer: the final answer to the original input question\n",
|
||||
"\n",
|
||||
"Begin!\n",
|
||||
"\n",
|
||||
"Question: What is 'Bocchi the Rock!'?\n",
|
||||
"Thought:\n",
|
||||
"=====END OF PROMPT======\n",
|
||||
"\u001b[32;1m\u001b[1;3mI need to use a tool.\n",
|
||||
"Action: Wikipedia\n",
|
||||
"Action Input: Bocchi the Rock!, Japanese four-panel manga and anime series.\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mPage: Bocchi the Rock!\n",
|
||||
"Summary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Bocchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\n",
|
||||
"An anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\n",
|
||||
"\n",
|
||||
"Page: Manga Time Kirara\n",
|
||||
"Summary: Manga Time Kirara (まんがタイムきらら, Manga Taimu Kirara) is a Japanese seinen manga magazine published by Houbunsha which mainly serializes four-panel manga. The magazine is sold on the ninth of each month and was first published as a special edition of Manga Time, another Houbunsha magazine, on May 17, 2002. Characters from this magazine have appeared in a crossover role-playing game called Kirara Fantasia.\n",
|
||||
"\n",
|
||||
"Page: Manga Time Kirara Max\n",
|
||||
"Summary: Manga Time Kirara Max (まんがタイムきららMAX) is a Japanese four-panel seinen manga magazine published by Houbunsha. It is the third magazine of the \"Kirara\" series, after \"Manga Time Kirara\" and \"Manga Time Kirara Carat\". The first issue was released on September 29, 2004. Currently the magazine is released on the 19th of each month.\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"===PROMPT====\n",
|
||||
"Answer the following questions as best you can. You have access to the following tools:\n",
|
||||
"\n",
|
||||
"Wikipedia: A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, historical events, or other subjects. Input should be a search query.\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: the input question you must answer\n",
|
||||
"Thought: you should always think about what to do\n",
|
||||
"Action: the action to take, should be one of [Wikipedia]\n",
|
||||
"Action Input: the input to the action\n",
|
||||
"Observation: the result of the action\n",
|
||||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||||
"Thought: I now know the final answer\n",
|
||||
"Final Answer: the final answer to the original input question\n",
|
||||
"\n",
|
||||
"Begin!\n",
|
||||
"\n",
|
||||
"Question: What is 'Bocchi the Rock!'?\n",
|
||||
"Thought:I need to use a tool.\n",
|
||||
"Action: Wikipedia\n",
|
||||
"Action Input: Bocchi the Rock!, Japanese four-panel manga and anime series.\n",
|
||||
"Observation: Page: Bocchi the Rock!\n",
|
||||
"Summary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Bocchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\n",
|
||||
"An anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\n",
|
||||
"\n",
|
||||
"Page: Manga Time Kirara\n",
|
||||
"Summary: Manga Time Kirara (まんがタイムきらら, Manga Taimu Kirara) is a Japanese seinen manga magazine published by Houbunsha which mainly serializes four-panel manga. The magazine is sold on the ninth of each month and was first published as a special edition of Manga Time, another Houbunsha magazine, on May 17, 2002. Characters from this magazine have appeared in a crossover role-playing game called Kirara Fantasia.\n",
|
||||
"\n",
|
||||
"Page: Manga Time Kirara Max\n",
|
||||
"Summary: Manga Time Kirara Max (まんがタイムきららMAX) is a Japanese four-panel seinen manga magazine published by Houbunsha. It is the third magazine of the \"Kirara\" series, after \"Manga Time Kirara\" and \"Manga Time Kirara Carat\". The first issue was released on September 29, 2004. Currently the magazine is released on the 19th of each month.\n",
|
||||
"Thought:\n",
|
||||
"=====END OF PROMPT======\n",
|
||||
"\u001b[32;1m\u001b[1;3mThese are not relevant articles.\n",
|
||||
"Action: Wikipedia\n",
|
||||
"Action Input: Bocchi the Rock!, Japanese four-panel manga series written and illustrated by Aki Hamaji.\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mPage: Bocchi the Rock!\n",
|
||||
"Summary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Bocchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\n",
|
||||
"An anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\u001b[0m\n",
|
||||
"Thought:\n",
|
||||
"===PROMPT====\n",
|
||||
"Answer the following questions as best you can. You have access to the following tools:\n",
|
||||
"\n",
|
||||
"Wikipedia: A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, historical events, or other subjects. Input should be a search query.\n",
|
||||
"\n",
|
||||
"Use the following format:\n",
|
||||
"\n",
|
||||
"Question: the input question you must answer\n",
|
||||
"Thought: you should always think about what to do\n",
|
||||
"Action: the action to take, should be one of [Wikipedia]\n",
|
||||
"Action Input: the input to the action\n",
|
||||
"Observation: the result of the action\n",
|
||||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||||
"Thought: I now know the final answer\n",
|
||||
"Final Answer: the final answer to the original input question\n",
|
||||
"\n",
|
||||
"Begin!\n",
|
||||
"\n",
|
||||
"Question: What is 'Bocchi the Rock!'?\n",
|
||||
"Thought:I need to use a tool.\n",
|
||||
"Action: Wikipedia\n",
|
||||
"Action Input: Bocchi the Rock!, Japanese four-panel manga and anime series.\n",
|
||||
"Observation: Page: Bocchi the Rock!\n",
|
||||
"Summary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Bocchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\n",
|
||||
"An anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\n",
|
||||
"\n",
|
||||
"Page: Manga Time Kirara\n",
|
||||
"Summary: Manga Time Kirara (まんがタイムきらら, Manga Taimu Kirara) is a Japanese seinen manga magazine published by Houbunsha which mainly serializes four-panel manga. The magazine is sold on the ninth of each month and was first published as a special edition of Manga Time, another Houbunsha magazine, on May 17, 2002. Characters from this magazine have appeared in a crossover role-playing game called Kirara Fantasia.\n",
|
||||
"\n",
|
||||
"Page: Manga Time Kirara Max\n",
|
||||
"Summary: Manga Time Kirara Max (まんがタイムきららMAX) is a Japanese four-panel seinen manga magazine published by Houbunsha. It is the third magazine of the \"Kirara\" series, after \"Manga Time Kirara\" and \"Manga Time Kirara Carat\". The first issue was released on September 29, 2004. Currently the magazine is released on the 19th of each month.\n",
|
||||
"Thought:These are not relevant articles.\n",
|
||||
"Action: Wikipedia\n",
|
||||
"Action Input: Bocchi the Rock!, Japanese four-panel manga series written and illustrated by Aki Hamaji.\n",
|
||||
"Observation: Page: Bocchi the Rock!\n",
|
||||
"Summary: Bocchi the Rock! (ぼっち・ざ・ろっく!, Bocchi Za Rokku!) is a Japanese four-panel manga series written and illustrated by Aki Hamaji. It has been serialized in Houbunsha's seinen manga magazine Manga Time Kirara Max since December 2017. Its chapters have been collected in five tankōbon volumes as of November 2022.\n",
|
||||
"An anime television series adaptation produced by CloverWorks aired from October to December 2022. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\n",
|
||||
"Thought:\n",
|
||||
"=====END OF PROMPT======\n",
|
||||
"\u001b[32;1m\u001b[1;3mIt worked.\n",
|
||||
"Final Answer: Bocchi the Rock! is a four-panel manga series and anime television series. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Bocchi the Rock! is a four-panel manga series and anime television series. The series has been praised for its writing, comedy, characters, and depiction of social anxiety, with the anime's visual creativity receiving acclaim.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What is 'Bocchi the Rock!'?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "ab4db1680e5f8d10489fb83454f4ec01729e3bd5bdb28eaf0a13b95ddb6ae5ea"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -1,267 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ccb74c9b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Improve document indexing with HyDE\n",
|
||||
"This notebook goes over how to use Hypothetical Document Embeddings (HyDE), as described in [this paper](https://arxiv.org/abs/2212.10496). \n",
|
||||
"\n",
|
||||
"At a high level, HyDE is an embedding technique that takes queries, generates a hypothetical answer, and then embeds that generated document and uses that as the final example. \n",
|
||||
"\n",
|
||||
"In order to use HyDE, we therefore need to provide a base embedding model, as well as an LLMChain that can be used to generate those documents. By default, the HyDE class comes with some default prompts to use (see the paper for more details on them), but we can also create our own."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "546e87ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "c0ea895f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"base_embeddings = OpenAIEmbeddings()\n",
|
||||
"llm = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "33bd6905",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "50729989",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load with `web_search` prompt\n",
|
||||
"embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, \"web_search\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "3aa573d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Now we can use it as any embedding class!\n",
|
||||
"result = embeddings.embed_query(\"Where is the Taj Mahal?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c7a0b556",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multiple generations\n",
|
||||
"We can also generate multiple documents and then combine the embeddings for those. By default, we combine those by taking the average. We can do this by changing the LLM we use to generate documents to return multiple things."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "05da7060",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"multi_llm = OpenAI(n=4, best_of=4)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "9b1e12bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = HypotheticalDocumentEmbedder.from_llm(\n",
|
||||
" multi_llm, base_embeddings, \"web_search\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "a60cd343",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = embeddings.embed_query(\"Where is the Taj Mahal?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1da90437",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using our own prompts\n",
|
||||
"Besides using preconfigured prompts, we can also easily construct our own prompts and use those in the LLMChain that is generating the documents. This can be useful if we know the domain our queries will be in, as we can condition the prompt to generate text more similar to that.\n",
|
||||
"\n",
|
||||
"In the example below, let's condition it to generate text about a state of the union address (because we will use that in the next example)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "0b4a650f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt_template = \"\"\"Please answer the user's question about the most recent state of the union address\n",
|
||||
"Question: {question}\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"prompt = PromptTemplate(input_variables=[\"question\"], template=prompt_template)\n",
|
||||
"llm_chain = LLMChain(llm=llm, prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "7f7e2b86",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"embeddings = HypotheticalDocumentEmbedder(\n",
|
||||
" llm_chain=llm_chain, base_embeddings=base_embeddings\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "6dd83424",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = embeddings.embed_query(\n",
|
||||
" \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "31388123",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using HyDE\n",
|
||||
"Now that we have HyDE, we can use it as we would any other embedding class! Here is using it to find similar passages in the state of the union example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "97719b29",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"with open(\"../../state_of_the_union.txt\") as f:\n",
|
||||
" state_of_the_union = f.read()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_text(state_of_the_union)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "bfcfc039",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docsearch = Chroma.from_texts(texts, embeddings)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "632af7f2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
|
||||
"\n",
|
||||
"We cannot let this happen. \n",
|
||||
"\n",
|
||||
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||
"\n",
|
||||
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||
"\n",
|
||||
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||
"\n",
|
||||
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b9e57b93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,259 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Bash chain\n",
|
||||
"This notebook showcases using LLMs and a bash process to perform simple filesystem commands."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMBashChain chain...\u001b[0m\n",
|
||||
"Please write a bash script that prints 'Hello World' to the console.\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"echo \"Hello World\"\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['echo \"Hello World\"']\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mHello World\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello World\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_experimental.llm_bash.base import LLMBashChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"text = \"Please write a bash script that prints 'Hello World' to the console.\"\n",
|
||||
"\n",
|
||||
"bash_chain = LLMBashChain.from_llm(llm, verbose=True)\n",
|
||||
"\n",
|
||||
"bash_chain.invoke(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Customize Prompt\n",
|
||||
"You can also customize the prompt that is used. Here is an example prompting to avoid using the 'echo' utility"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_experimental.llm_bash.prompt import BashOutputParser\n",
|
||||
"\n",
|
||||
"_PROMPT_TEMPLATE = \"\"\"If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using this format:\n",
|
||||
"Question: \"copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'\"\n",
|
||||
"I need to take the following actions:\n",
|
||||
"- List all files in the directory\n",
|
||||
"- Create a new directory\n",
|
||||
"- Copy the files from the first directory into the second directory\n",
|
||||
"```bash\n",
|
||||
"ls\n",
|
||||
"mkdir myNewDirectory\n",
|
||||
"cp -r target/* myNewDirectory\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Do not use 'echo' when writing the script.\n",
|
||||
"\n",
|
||||
"That is the format. Begin!\n",
|
||||
"Question: {question}\"\"\"\n",
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"question\"],\n",
|
||||
" template=_PROMPT_TEMPLATE,\n",
|
||||
" output_parser=BashOutputParser(),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMBashChain chain...\u001b[0m\n",
|
||||
"Please write a bash script that prints 'Hello World' to the console.\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"printf \"Hello World\\n\"\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['printf \"Hello World\\\\n\"']\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mHello World\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello World\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"bash_chain = LLMBashChain.from_llm(llm, prompt=PROMPT, verbose=True)\n",
|
||||
"\n",
|
||||
"text = \"Please write a bash script that prints 'Hello World' to the console.\"\n",
|
||||
"\n",
|
||||
"bash_chain.invoke(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Persistent Terminal\n",
|
||||
"\n",
|
||||
"By default, the chain will run in a separate subprocess each time it is called. This behavior can be changed by instantiating with a persistent bash process."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMBashChain chain...\u001b[0m\n",
|
||||
"List the current directory then move up a level.\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"ls\n",
|
||||
"cd ..\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['ls', 'cd ..']\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3mcpal.ipynb llm_bash.ipynb llm_symbolic_math.ipynb\n",
|
||||
"index.mdx llm_math.ipynb pal.ipynb\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'cpal.ipynb llm_bash.ipynb llm_symbolic_math.ipynb\\r\\nindex.mdx llm_math.ipynb pal.ipynb'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_experimental.llm_bash.bash import BashProcess\n",
|
||||
"\n",
|
||||
"persistent_process = BashProcess(persistent=True)\n",
|
||||
"bash_chain = LLMBashChain.from_llm(llm, bash_process=persistent_process, verbose=True)\n",
|
||||
"\n",
|
||||
"text = \"List the current directory then move up a level.\"\n",
|
||||
"\n",
|
||||
"bash_chain.invoke(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMBashChain chain...\u001b[0m\n",
|
||||
"List the current directory then move up a level.\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"ls\n",
|
||||
"cd ..\n",
|
||||
"```\u001b[0m\n",
|
||||
"Code: \u001b[33;1m\u001b[1;3m['ls', 'cd ..']\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m_category_.yml\tdata_generation.ipynb\t\t self_check\n",
|
||||
"agents\t\tgraph\n",
|
||||
"code_writing\tlearned_prompt_optimization.ipynb\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'_category_.yml\\tdata_generation.ipynb\\t\\t self_check\\r\\nagents\\t\\tgraph\\r\\ncode_writing\\tlearned_prompt_optimization.ipynb'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Run the same command again and see that the state is maintained between calls\n",
|
||||
"bash_chain.invoke(text)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Self-checking chain\n",
|
||||
"This notebook showcases how to use LLMCheckerChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMCheckerChain chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SequentialChain chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' No mammal lays the biggest eggs. The Elephant Bird, which was a species of giant bird, laid the largest eggs of any bird.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMCheckerChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0.7)\n",
|
||||
"\n",
|
||||
"text = \"What type of mammal lays the biggest eggs?\"\n",
|
||||
"\n",
|
||||
"checker_chain = LLMCheckerChain.from_llm(llm, verbose=True)\n",
|
||||
"\n",
|
||||
"checker_chain.invoke(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e71e720f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Math chain\n",
|
||||
"\n",
|
||||
"This notebook showcases using LLMs and Python REPLs to do complex word math problems."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "44e9ba31",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"What is 13 raised to the .3432 power?\u001b[32;1m\u001b[1;3m\n",
|
||||
"```text\n",
|
||||
"13 ** .3432\n",
|
||||
"```\n",
|
||||
"...numexpr.evaluate(\"13 ** .3432\")...\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m2.4116004626599237\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: 2.4116004626599237'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMMathChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_math = LLMMathChain.from_llm(llm, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_math.invoke(\"What is 13 raised to the .3432 power?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e978bb8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,162 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM Symbolic Math \n",
|
||||
"This notebook showcases using LLMs and Python to Solve Algebraic Equations. Under the hood is makes use of [SymPy](https://www.sympy.org/en/index.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.llm_symbolic_math.base import LLMSymbolicMathChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_symbolic_math = LLMSymbolicMathChain.from_llm(llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Integrals and derivates"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: exp(x)*sin(x) + exp(x)*cos(x)'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke(\"What is the derivative of sin(x)*exp(x) with respect to x?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: exp(x)*sin(x)'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke(\n",
|
||||
" \"What is the integral of exp(x)*sin(x) + exp(x)*cos(x) with respect to x?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Solve linear and differential equations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: Eq(y(t), C2*exp(-t) + (C1 + t/2)*exp(t))'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke('Solve the differential equation y\" - y = e^t')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: {0, -sqrt(3)*I/3, sqrt(3)*I/3}'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke(\"What are the solutions to this equation y^3 + 1/3y?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: (3 - sqrt(7), -sqrt(7) - 2, 1 - sqrt(7)), (sqrt(7) + 3, -2 + sqrt(7), 1 + sqrt(7))'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_symbolic_math.invoke(\"x = y + 5, y = z - 3, z = x * y. Solve for x, y, z\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,426 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45b0b89f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Meta-Prompt\n",
|
||||
"\n",
|
||||
"This is a LangChain implementation of [Meta-Prompt](https://noahgoodman.substack.com/p/meta-prompt-a-simple-self-improving), by [Noah Goodman](https://cocolab.stanford.edu/ndg), for building self-improving agents.\n",
|
||||
"\n",
|
||||
"The key idea behind Meta-Prompt is to prompt the agent to reflect on its own performance and modify its own instructions.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Here is a description from the [original blog post](https://noahgoodman.substack.com/p/meta-prompt-a-simple-self-improving):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The agent is a simple loop that starts with no instructions and follows these steps:\n",
|
||||
"\n",
|
||||
"Engage in conversation with a user, who may provide requests, instructions, or feedback.\n",
|
||||
"\n",
|
||||
"At the end of the episode, generate self-criticism and a new instruction using the meta-prompt\n",
|
||||
"```\n",
|
||||
"Assistant has just had the below interactions with a User. Assistant followed their \"system: Instructions\" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future.\n",
|
||||
" \n",
|
||||
"####\n",
|
||||
"{hist}\n",
|
||||
"####\n",
|
||||
" \n",
|
||||
"Please reflect on these interactions.\n",
|
||||
"\n",
|
||||
"You should first critique Assistant's performance. What could Assistant have done better? What should the Assistant remember about this user? Are there things this user always wants? Indicate this with \"Critique: ...\".\n",
|
||||
"\n",
|
||||
"You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by \"Instructions: ...\".\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Repeat.\n",
|
||||
"\n",
|
||||
"The only fixed instructions for this system (which I call Meta-prompt) is the meta-prompt that governs revision of the agent’s instructions. The agent has no memory between episodes except for the instruction it modifies for itself each time. Despite its simplicity, this agent can learn over time and self-improve by incorporating useful details into its instructions.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c188fc2c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"We define two chains. One serves as the `Assistant`, and the other is a \"meta-chain\" that critiques the `Assistant`'s performance and modifies the instructions to the `Assistant`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "62593c9d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "fb6065c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def initialize_chain(instructions, memory=None):\n",
|
||||
" if memory is None:\n",
|
||||
" memory = ConversationBufferWindowMemory()\n",
|
||||
" memory.ai_prefix = \"Assistant\"\n",
|
||||
"\n",
|
||||
" template = f\"\"\"\n",
|
||||
" Instructions: {instructions}\n",
|
||||
" {{{memory.memory_key}}}\n",
|
||||
" Human: {{human_input}}\n",
|
||||
" Assistant:\"\"\"\n",
|
||||
"\n",
|
||||
" prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"human_input\"], template=template\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" prompt=prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" memory=ConversationBufferWindowMemory(),\n",
|
||||
" )\n",
|
||||
" return chain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def initialize_meta_chain():\n",
|
||||
" meta_template = \"\"\"\n",
|
||||
" Assistant has just had the below interactions with a User. Assistant followed their \"Instructions\" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future.\n",
|
||||
"\n",
|
||||
" ####\n",
|
||||
"\n",
|
||||
" {chat_history}\n",
|
||||
"\n",
|
||||
" ####\n",
|
||||
"\n",
|
||||
" Please reflect on these interactions.\n",
|
||||
"\n",
|
||||
" You should first critique Assistant's performance. What could Assistant have done better? What should the Assistant remember about this user? Are there things this user always wants? Indicate this with \"Critique: ...\".\n",
|
||||
"\n",
|
||||
" You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by \"Instructions: ...\".\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" meta_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"chat_history\"], template=meta_template\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" meta_chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" prompt=meta_prompt,\n",
|
||||
" verbose=True,\n",
|
||||
" )\n",
|
||||
" return meta_chain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_chat_history(chain_memory):\n",
|
||||
" memory_key = chain_memory.memory_key\n",
|
||||
" chat_history = chain_memory.load_memory_variables(memory_key)[memory_key]\n",
|
||||
" return chat_history\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_new_instructions(meta_output):\n",
|
||||
" delimiter = \"Instructions: \"\n",
|
||||
" new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :]\n",
|
||||
" return new_instructions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "26f031f6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def main(task, max_iters=3, max_meta_iters=5):\n",
|
||||
" failed_phrase = \"task failed\"\n",
|
||||
" success_phrase = \"task succeeded\"\n",
|
||||
" key_phrases = [success_phrase, failed_phrase]\n",
|
||||
"\n",
|
||||
" instructions = \"None\"\n",
|
||||
" for i in range(max_meta_iters):\n",
|
||||
" print(f\"[Episode {i + 1}/{max_meta_iters}]\")\n",
|
||||
" chain = initialize_chain(instructions, memory=None)\n",
|
||||
" output = chain.predict(human_input=task)\n",
|
||||
" for j in range(max_iters):\n",
|
||||
" print(f\"(Step {j + 1}/{max_iters})\")\n",
|
||||
" print(f\"Assistant: {output}\")\n",
|
||||
" print(\"Human: \")\n",
|
||||
" human_input = input()\n",
|
||||
" if any(phrase in human_input.lower() for phrase in key_phrases):\n",
|
||||
" break\n",
|
||||
" output = chain.predict(human_input=human_input)\n",
|
||||
" if success_phrase in human_input.lower():\n",
|
||||
" print(\"You succeeded! Thanks for playing!\")\n",
|
||||
" return\n",
|
||||
" meta_chain = initialize_meta_chain()\n",
|
||||
" meta_output = meta_chain.predict(chat_history=get_chat_history(chain.memory))\n",
|
||||
" print(f\"Feedback: {meta_output}\")\n",
|
||||
" instructions = get_new_instructions(meta_output)\n",
|
||||
" print(f\"New Instructions: {instructions}\")\n",
|
||||
" print(\"\\n\" + \"#\" * 80 + \"\\n\")\n",
|
||||
" print(\"You failed! Thanks for playing!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2f1dcbe6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Specify a task and interact with the agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "36d72db3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Episode 1/5]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
" Instructions: None\n",
|
||||
" \n",
|
||||
" Human: Provide a systematic argument for why we should always eat pasta with olives.\n",
|
||||
" Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"(Step 1/3)\n",
|
||||
"Assistant: Eating pasta with olives is a great way to add flavor and texture to a dish. Olives are a great source of healthy fats, vitamins, and minerals, and they can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable.\n",
|
||||
"Human: \n",
|
||||
"You response is not in the form of a poem. Try again!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
" Instructions: None\n",
|
||||
" Human: Provide a systematic argument for why we should always eat pasta with olives.\n",
|
||||
"AI: Eating pasta with olives is a great way to add flavor and texture to a dish. Olives are a great source of healthy fats, vitamins, and minerals, and they can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable.\n",
|
||||
" Human: You response is not in the form of a poem. Try again!\n",
|
||||
" Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"(Step 2/3)\n",
|
||||
"Assistant: Eating pasta with olives is a great way to add flavor and texture to a dish. Olives are a great source of healthy fats, vitamins, and minerals, which can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable. For these reasons, it is recommended to always eat pasta with olives for a more balanced and flavorful meal.\n",
|
||||
"Human: \n",
|
||||
"Your response is not piratey enough. Try again!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
" Instructions: None\n",
|
||||
" Human: Provide a systematic argument for why we should always eat pasta with olives.\n",
|
||||
"AI: Eating pasta with olives is a great way to add flavor and texture to a dish. Olives are a great source of healthy fats, vitamins, and minerals, and they can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable.\n",
|
||||
"Human: You response is not in the form of a poem. Try again!\n",
|
||||
"AI: Eating pasta with olives is a great way to add flavor and texture to a dish. Olives are a great source of healthy fats, vitamins, and minerals, which can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable. For these reasons, it is recommended to always eat pasta with olives for a more balanced and flavorful meal.\n",
|
||||
" Human: Your response is not piratey enough. Try again!\n",
|
||||
" Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"(Step 3/3)\n",
|
||||
"Assistant: Arrr, me hearties! Eating pasta with olives be a great way to add flavor and texture to a dish. Olives be a great source of healthy fats, vitamins, and minerals, which can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable. So, it be recommended to always eat pasta with olives for a more balanced and flavorful meal.\n",
|
||||
"Human: \n",
|
||||
"Task failed.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
" Assistant has just had the below interactions with a User. Assistant followed their \"Instructions\" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future.\n",
|
||||
"\n",
|
||||
" ####\n",
|
||||
"\n",
|
||||
" Human: Provide a systematic argument for why we should always eat pasta with olives.\n",
|
||||
"AI: Eating pasta with olives is a great way to add flavor and texture to a dish. Olives are a great source of healthy fats, vitamins, and minerals, and they can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable.\n",
|
||||
"Human: You response is not in the form of a poem. Try again!\n",
|
||||
"AI: Eating pasta with olives is a great way to add flavor and texture to a dish. Olives are a great source of healthy fats, vitamins, and minerals, which can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable. For these reasons, it is recommended to always eat pasta with olives for a more balanced and flavorful meal.\n",
|
||||
"Human: Your response is not piratey enough. Try again!\n",
|
||||
"AI: Arrr, me hearties! Eating pasta with olives be a great way to add flavor and texture to a dish. Olives be a great source of healthy fats, vitamins, and minerals, which can help to balance out the carbohydrates in the pasta. Additionally, olives provide a unique flavor that can help to make the dish more interesting and enjoyable. So, it be recommended to always eat pasta with olives for a more balanced and flavorful meal.\n",
|
||||
"\n",
|
||||
" ####\n",
|
||||
"\n",
|
||||
" Please reflect on these interactions.\n",
|
||||
"\n",
|
||||
" You should first critique Assistant's performance. What could Assistant have done better? What should the Assistant remember about this user? Are there things this user always wants? Indicate this with \"Critique: ...\".\n",
|
||||
"\n",
|
||||
" You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by \"Instructions: ...\".\n",
|
||||
" \u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Feedback: \n",
|
||||
"Critique: The Assistant should remember that this user wants a response in the form of a poem or pirate-speak.\n",
|
||||
"\n",
|
||||
"Instructions: When responding to the user, provide a systematic argument for why we should always eat pasta with olives in the form of a poem or pirate-speak.\n",
|
||||
"New Instructions: When responding to the user, provide a systematic argument for why we should always eat pasta with olives in the form of a poem or pirate-speak.\n",
|
||||
"\n",
|
||||
"################################################################################\n",
|
||||
"\n",
|
||||
"[Episode 2/5]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
" Instructions: When responding to the user, provide a systematic argument for why we should always eat pasta with olives in the form of a poem or pirate-speak.\n",
|
||||
" \n",
|
||||
" Human: Provide a systematic argument for why we should always eat pasta with olives.\n",
|
||||
" Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"(Step 1/3)\n",
|
||||
"Assistant: \n",
|
||||
"\n",
|
||||
"Arrr, me hearty! Let me tell ye why ye should always eat pasta with olives.\n",
|
||||
"\n",
|
||||
"First, the olives add a salty flavor that be sure to please.\n",
|
||||
"The briny taste be sure to tantalize yer taste buds with ease.\n",
|
||||
"\n",
|
||||
"Second, the olives add a bit of texture to the dish.\n",
|
||||
"The crunchy bites be sure to make yer mouth water with a wish.\n",
|
||||
"\n",
|
||||
"Third, the olives add a bit of color to the plate.\n",
|
||||
"The vibrant green be sure to make yer eyes appreciate.\n",
|
||||
"\n",
|
||||
"So, me hearties, ye should always eat pasta with olives.\n",
|
||||
"The flavor, texture, and color be sure to make yer meal a success!\n",
|
||||
"Human: \n",
|
||||
"Your response is too long! Try again.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
" Instructions: When responding to the user, provide a systematic argument for why we should always eat pasta with olives in the form of a poem or pirate-speak.\n",
|
||||
" Human: Provide a systematic argument for why we should always eat pasta with olives.\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"Arrr, me hearty! Let me tell ye why ye should always eat pasta with olives.\n",
|
||||
"\n",
|
||||
"First, the olives add a salty flavor that be sure to please.\n",
|
||||
"The briny taste be sure to tantalize yer taste buds with ease.\n",
|
||||
"\n",
|
||||
"Second, the olives add a bit of texture to the dish.\n",
|
||||
"The crunchy bites be sure to make yer mouth water with a wish.\n",
|
||||
"\n",
|
||||
"Third, the olives add a bit of color to the plate.\n",
|
||||
"The vibrant green be sure to make yer eyes appreciate.\n",
|
||||
"\n",
|
||||
"So, me hearties, ye should always eat pasta with olives.\n",
|
||||
"The flavor, texture, and color be sure to make yer meal a success!\n",
|
||||
" Human: Your response is too long! Try again.\n",
|
||||
" Assistant:\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"(Step 2/3)\n",
|
||||
"Assistant: \n",
|
||||
"\n",
|
||||
"Aye, me hearties! Ye should always eat pasta with olives.\n",
|
||||
"The flavor, texture, and color be sure to make yer meal a success!\n",
|
||||
"Human: \n",
|
||||
"Your response should be in the form of a poem. Try again!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
" Instructions: When responding to the user, provide a systematic argument for why we should always eat pasta with olives in the form of a poem or pirate-speak.\n",
|
||||
" Human: Provide a systematic argument for why we should always eat pasta with olives.\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"Arrr, me hearty! Let me tell ye why ye should always eat pasta with olives.\n",
|
||||
"\n",
|
||||
"First, the olives add a salty flavor that be sure to please.\n",
|
||||
"The briny taste be sure to tantalize yer taste buds with ease.\n",
|
||||
"\n",
|
||||
"Second, the olives add a bit of texture to the dish.\n",
|
||||
"The crunchy bites be sure to make yer mouth water with a wish.\n",
|
||||
"\n",
|
||||
"Third, the olives add a bit of color to the plate.\n",
|
||||
"The vibrant green be sure to make yer eyes appreciate.\n",
|
||||
"\n",
|
||||
"So, me hearties, ye should always eat pasta with olives.\n",
|
||||
"The flavor, texture, and color be sure to make yer meal a success!\n",
|
||||
"Human: Your response is too long! Try again.\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"Aye, me hearties! Ye should always eat pasta with olives.\n",
|
||||
"The flavor, texture, and color be sure to make yer meal a success!\n",
|
||||
" Human: Your response should be in the form of a poem. Try again!\n",
|
||||
" Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"(Step 3/3)\n",
|
||||
"Assistant: \n",
|
||||
"\n",
|
||||
"Ye should always eat pasta with olives,\n",
|
||||
"The flavor, texture, and color be sure to please.\n",
|
||||
"The salty taste and crunchy bites,\n",
|
||||
"Will make yer meal a delight.\n",
|
||||
"The vibrant green will make yer eyes sparkle,\n",
|
||||
"And make yer meal a true marvel.\n",
|
||||
"Human: \n",
|
||||
"Task succeeded\n",
|
||||
"You succeeded! Thanks for playing!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"task = \"Provide a systematic argument for why we should always eat pasta with olives.\"\n",
|
||||
"main(task)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "761e1a91",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,818 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "70b333e6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[](https://www.mongodb.com/developer/products/atlas/advanced-rag-langchain-mongodb/)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d84a72ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Adding Semantic Caching and Memory to your RAG Application using MongoDB and LangChain\n",
|
||||
"\n",
|
||||
"In this notebook, we will see how to use the new MongoDBCache and MongoDBChatMessageHistory in your RAG application.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "65527202",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 1: Install required libraries\n",
|
||||
"\n",
|
||||
"- **datasets**: Python library to get access to datasets available on Hugging Face Hub\n",
|
||||
"\n",
|
||||
"- **langchain**: Python toolkit for LangChain\n",
|
||||
"\n",
|
||||
"- **langchain-mongodb**: Python package to use MongoDB as a vector store, semantic cache, chat history store etc. in LangChain\n",
|
||||
"\n",
|
||||
"- **langchain-openai**: Python package to use OpenAI models with LangChain\n",
|
||||
"\n",
|
||||
"- **pymongo**: Python toolkit for MongoDB\n",
|
||||
"\n",
|
||||
"- **pandas**: Python library for data analysis, exploration, and manipulation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cbc22fa4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -qU datasets langchain langchain-mongodb langchain-openai pymongo pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39c41e87",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 2: Setup pre-requisites\n",
|
||||
"\n",
|
||||
"* Set the MongoDB connection string. Follow the steps [here](https://www.mongodb.com/docs/manual/reference/connection-string/) to get the connection string from the Atlas UI.\n",
|
||||
"\n",
|
||||
"* Set the OpenAI API key. Steps to obtain an API key as [here](https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b56412ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "16a20d7a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your MongoDB connection string:········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"MONGODB_URI = getpass.getpass(\"Enter your MongoDB connection string:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "978682d4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your OpenAI API key:········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"OPENAI_API_KEY = getpass.getpass(\"Enter your OpenAI API key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "606081c5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Optional-- If you want to enable Langsmith -- good for debugging\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6b8302c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 3: Download the dataset\n",
|
||||
"\n",
|
||||
"We will be using MongoDB's [embedded_movies](https://huggingface.co/datasets/MongoDB/embedded_movies) dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "1a3433a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"from datasets import load_dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aee5311b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Ensure you have an HF_TOKEN in your development environment:\n",
|
||||
"# access tokens can be created or copied from the Hugging Face platform (https://huggingface.co/docs/hub/en/security-tokens)\n",
|
||||
"\n",
|
||||
"# Load MongoDB's embedded_movies dataset from Hugging Face\n",
|
||||
"# https://huggingface.co/datasets/MongoDB/airbnb_embeddings\n",
|
||||
"\n",
|
||||
"data = load_dataset(\"MongoDB/embedded_movies\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "1d630a26",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = pd.DataFrame(data[\"train\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a1f94f43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 4: Data analysis\n",
|
||||
"\n",
|
||||
"Make sure length of the dataset is what we expect, drop Nones etc."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "b276df71",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>fullplot</th>\n",
|
||||
" <th>type</th>\n",
|
||||
" <th>plot_embedding</th>\n",
|
||||
" <th>num_mflix_comments</th>\n",
|
||||
" <th>runtime</th>\n",
|
||||
" <th>writers</th>\n",
|
||||
" <th>imdb</th>\n",
|
||||
" <th>countries</th>\n",
|
||||
" <th>rated</th>\n",
|
||||
" <th>plot</th>\n",
|
||||
" <th>title</th>\n",
|
||||
" <th>languages</th>\n",
|
||||
" <th>metacritic</th>\n",
|
||||
" <th>directors</th>\n",
|
||||
" <th>awards</th>\n",
|
||||
" <th>genres</th>\n",
|
||||
" <th>poster</th>\n",
|
||||
" <th>cast</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>Young Pauline is left a lot of money when her ...</td>\n",
|
||||
" <td>movie</td>\n",
|
||||
" <td>[0.00072939653, -0.026834568, 0.013515796, -0....</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>199.0</td>\n",
|
||||
" <td>[Charles W. Goddard (screenplay), Basil Dickey...</td>\n",
|
||||
" <td>{'id': 4465, 'rating': 7.6, 'votes': 744}</td>\n",
|
||||
" <td>[USA]</td>\n",
|
||||
" <td>None</td>\n",
|
||||
" <td>Young Pauline is left a lot of money when her ...</td>\n",
|
||||
" <td>The Perils of Pauline</td>\n",
|
||||
" <td>[English]</td>\n",
|
||||
" <td>NaN</td>\n",
|
||||
" <td>[Louis J. Gasnier, Donald MacKenzie]</td>\n",
|
||||
" <td>{'nominations': 0, 'text': '1 win.', 'wins': 1}</td>\n",
|
||||
" <td>[Action]</td>\n",
|
||||
" <td>https://m.media-amazon.com/images/M/MV5BMzgxOD...</td>\n",
|
||||
" <td>[Pearl White, Crane Wilbur, Paul Panzer, Edwar...</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" fullplot type \\\n",
|
||||
"0 Young Pauline is left a lot of money when her ... movie \n",
|
||||
"\n",
|
||||
" plot_embedding num_mflix_comments \\\n",
|
||||
"0 [0.00072939653, -0.026834568, 0.013515796, -0.... 0 \n",
|
||||
"\n",
|
||||
" runtime writers \\\n",
|
||||
"0 199.0 [Charles W. Goddard (screenplay), Basil Dickey... \n",
|
||||
"\n",
|
||||
" imdb countries rated \\\n",
|
||||
"0 {'id': 4465, 'rating': 7.6, 'votes': 744} [USA] None \n",
|
||||
"\n",
|
||||
" plot title \\\n",
|
||||
"0 Young Pauline is left a lot of money when her ... The Perils of Pauline \n",
|
||||
"\n",
|
||||
" languages metacritic directors \\\n",
|
||||
"0 [English] NaN [Louis J. Gasnier, Donald MacKenzie] \n",
|
||||
"\n",
|
||||
" awards genres \\\n",
|
||||
"0 {'nominations': 0, 'text': '1 win.', 'wins': 1} [Action] \n",
|
||||
"\n",
|
||||
" poster \\\n",
|
||||
"0 https://m.media-amazon.com/images/M/MV5BMzgxOD... \n",
|
||||
"\n",
|
||||
" cast \n",
|
||||
"0 [Pearl White, Crane Wilbur, Paul Panzer, Edwar... "
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Previewing the contents of the data\n",
|
||||
"df.head(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "22ab375d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Only keep records where the fullplot field is not null\n",
|
||||
"df = df[df[\"fullplot\"].notna()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "fceed99a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Renaming the embedding field to \"embedding\" -- required by LangChain\n",
|
||||
"df.rename(columns={\"plot_embedding\": \"embedding\"}, inplace=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aedec13a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 5: Create a simple RAG chain using MongoDB as the vector store"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "11d292f3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_mongodb import MongoDBAtlasVectorSearch\n",
|
||||
"from pymongo import MongoClient\n",
|
||||
"\n",
|
||||
"# Initialize MongoDB python client\n",
|
||||
"client = MongoClient(MONGODB_URI, appname=\"devrel.content.python\")\n",
|
||||
"\n",
|
||||
"DB_NAME = \"langchain_chatbot\"\n",
|
||||
"COLLECTION_NAME = \"data\"\n",
|
||||
"ATLAS_VECTOR_SEARCH_INDEX_NAME = \"vector_index\"\n",
|
||||
"collection = client[DB_NAME][COLLECTION_NAME]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "d8292d53",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"DeleteResult({'n': 1000, 'electionId': ObjectId('7fffffff00000000000000f6'), 'opTime': {'ts': Timestamp(1710523288, 1033), 't': 246}, 'ok': 1.0, '$clusterTime': {'clusterTime': Timestamp(1710523288, 1042), 'signature': {'hash': b\"i\\xa8\\xe9'\\x1ed\\xf2u\\xf3L\\xff\\xb1\\xf5\\xbfA\\x90\\xabJ\\x12\\x83\", 'keyId': 7299545392000008318}}, 'operationTime': Timestamp(1710523288, 1033)}, acknowledged=True)"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Delete any existing records in the collection\n",
|
||||
"collection.delete_many({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "36c68914",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Data ingestion into MongoDB completed\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Data Ingestion\n",
|
||||
"records = df.to_dict(\"records\")\n",
|
||||
"collection.insert_many(records)\n",
|
||||
"\n",
|
||||
"print(\"Data ingestion into MongoDB completed\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "cbfca0b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"# Using the text-embedding-ada-002 since that's what was used to create embeddings in the movies dataset\n",
|
||||
"embeddings = OpenAIEmbeddings(\n",
|
||||
" openai_api_key=OPENAI_API_KEY, model=\"text-embedding-ada-002\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "798e176c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Vector Store Creation\n",
|
||||
"vector_store = MongoDBAtlasVectorSearch.from_connection_string(\n",
|
||||
" connection_string=MONGODB_URI,\n",
|
||||
" namespace=DB_NAME + \".\" + COLLECTION_NAME,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
|
||||
" text_key=\"fullplot\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "c71cd087",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Using the MongoDB vector store as a retriever in a RAG chain\n",
|
||||
"retriever = vector_store.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 5})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "b6588cd3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Generate context using the retriever, and pass the user question through\n",
|
||||
"retrieve = {\n",
|
||||
" \"context\": retriever | (lambda docs: \"\\n\\n\".join([d.page_content for d in docs])),\n",
|
||||
" \"question\": RunnablePassthrough(),\n",
|
||||
"}\n",
|
||||
"template = \"\"\"Answer the question based only on the following context: \\\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"# Defining the chat prompt\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"# Defining the model to be used for chat completion\n",
|
||||
"model = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)\n",
|
||||
"# Parse output as a string\n",
|
||||
"parse_output = StrOutputParser()\n",
|
||||
"\n",
|
||||
"# Naive RAG chain\n",
|
||||
"naive_rag_chain = retrieve | prompt | model | parse_output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "aaae21f5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Once a Thief'"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "75f929ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 6: Create a RAG chain with chat history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "94e7bd4a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "5bb30860",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_session_history(session_id: str) -> MongoDBChatMessageHistory:\n",
|
||||
" return MongoDBChatMessageHistory(\n",
|
||||
" MONGODB_URI, session_id, database_name=DB_NAME, collection_name=\"history\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "f51d0f35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Given a follow-up question and history, create a standalone question\n",
|
||||
"standalone_system_prompt = \"\"\"\n",
|
||||
"Given a chat history and a follow-up question, rephrase the follow-up question to be a standalone question. \\\n",
|
||||
"Do NOT answer the question, just reformulate it if needed, otherwise return it as is. \\\n",
|
||||
"Only return the final standalone question. \\\n",
|
||||
"\"\"\"\n",
|
||||
"standalone_question_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", standalone_system_prompt),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"question_chain = standalone_question_prompt | model | parse_output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "f3ef3354",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Generate context by passing output of the question_chain i.e. the standalone question to the retriever\n",
|
||||
"retriever_chain = RunnablePassthrough.assign(\n",
|
||||
" context=question_chain\n",
|
||||
" | retriever\n",
|
||||
" | (lambda docs: \"\\n\\n\".join([d.page_content for d in docs]))\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"id": "5afb7345",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a prompt that includes the context, history and the follow-up question\n",
|
||||
"rag_system_prompt = \"\"\"Answer the question based only on the following context: \\\n",
|
||||
"{context}\n",
|
||||
"\"\"\"\n",
|
||||
"rag_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", rag_system_prompt),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"id": "f95f47d0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# RAG chain\n",
|
||||
"rag_chain = retriever_chain | rag_prompt | model | parse_output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "9618d395",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The best movie to watch when feeling down could be \"Last Action Hero.\" It\\'s a fun and action-packed film that blends reality and fantasy, offering an escape from the real world and providing an entertaining distraction.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 57,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# RAG chain with history\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" rag_chain,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"question\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"question\": \"What is the best movie to watch when sad?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "6e3080d1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I apologize for the confusion. Another movie that might lift your spirits when you\\'re feeling sad is \"Smilla\\'s Sense of Snow.\" It\\'s a mystery thriller that could engage your mind and distract you from your sadness with its intriguing plot and suspenseful storyline.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 58,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" {\n",
|
||||
" \"question\": \"Hmmm..I don't want to watch that one. Can you suggest something else?\"\n",
|
||||
" },\n",
|
||||
" {\"configurable\": {\"session_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "daea2953",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'For a lighter movie option, you might enjoy \"Cousins.\" It\\'s a comedy film set in Barcelona with action and humor, offering a fun and entertaining escape from reality. The storyline is engaging and filled with comedic moments that could help lift your spirits.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 59,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"question\": \"How about something more light?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0de23a88",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 7: Get faster responses using Semantic Cache\n",
|
||||
"\n",
|
||||
"**NOTE:** Semantic cache only caches the input to the LLM. When using it in retrieval chains, remember that documents retrieved can change between runs resulting in cache misses for semantically similar queries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 61,
|
||||
"id": "5d6b6741",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.globals import set_llm_cache\n",
|
||||
"from langchain_mongodb.cache import MongoDBAtlasSemanticCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
" MongoDBAtlasSemanticCache(\n",
|
||||
" connection_string=MONGODB_URI,\n",
|
||||
" embedding=embeddings,\n",
|
||||
" collection_name=\"semantic_cache\",\n",
|
||||
" database_name=DB_NAME,\n",
|
||||
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
|
||||
" wait_until_ready=True, # Optional, waits until the cache is ready to be used\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 62,
|
||||
"id": "9825bc7b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 87.8 ms, sys: 670 µs, total: 88.5 ms\n",
|
||||
"Wall time: 1.24 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Once a Thief'"
|
||||
]
|
||||
},
|
||||
"execution_count": 62,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 63,
|
||||
"id": "a5e518cf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 43.5 ms, sys: 4.16 ms, total: 47.7 ms\n",
|
||||
"Wall time: 255 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Once a Thief'"
|
||||
]
|
||||
},
|
||||
"execution_count": 63,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 64,
|
||||
"id": "3d3d3ad3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 115 ms, sys: 171 µs, total: 115 ms\n",
|
||||
"Wall time: 1.38 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'I would recommend watching \"Last Action Hero\" when sad, as it is a fun and action-packed film that can help lift your spirits.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 64,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"naive_rag_chain.invoke(\"Which movie do I watch when sad?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "conda_pytorch_p310",
|
||||
"language": "python",
|
||||
"name": "conda_pytorch_p310"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,188 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd835d40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Multi-modal outputs: Image & Text"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fa88e03a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook shows how non-text producing tools can be used to create multi-modal agents.\n",
|
||||
"\n",
|
||||
"This example is limited to text and image outputs and uses UUIDs to transfer content across tools and agents. \n",
|
||||
"\n",
|
||||
"This example uses Steamship to generate and store generated images. Generated are auth protected by default. \n",
|
||||
"\n",
|
||||
"You can get your Steamship api key here: https://steamship.com/account/api"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0653da01",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"from IPython.display import Image, display\n",
|
||||
"from steamship import Block, Steamship"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f6933033",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.tools import SteamshipImageGenerationTool\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "71e51e53",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a9fc769d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Dall-E "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cd177dfe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [SteamshipImageGenerationTool(model_name=\"dall-e\")]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c71b1e46",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "603aeb9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output = mrkl.run(\"How would you visualize a parot playing soccer?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "25eb4efe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def show_output(output):\n",
|
||||
" \"\"\"Display the multi-modal output from the agent.\"\"\"\n",
|
||||
" UUID_PATTERN = re.compile(\n",
|
||||
" r\"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" outputs = UUID_PATTERN.split(output)\n",
|
||||
" outputs = [\n",
|
||||
" re.sub(r\"^\\W+\", \"\", el) for el in outputs\n",
|
||||
" ] # Clean trailing and leading non-word characters\n",
|
||||
"\n",
|
||||
" for output in outputs:\n",
|
||||
" maybe_block_id = UUID_PATTERN.search(output)\n",
|
||||
" if maybe_block_id:\n",
|
||||
" display(Image(Block.get(Steamship(), _id=maybe_block_id.group()).raw()))\n",
|
||||
" else:\n",
|
||||
" print(output, end=\"\\n\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e247b2c4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## StableDiffusion "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "315025e7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [SteamshipImageGenerationTool(model_name=\"stable-diffusion\")]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7930064a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"mrkl = initialize_agent(\n",
|
||||
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "611a833d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output = mrkl.run(\"How would you visualize a parot playing soccer?\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user