mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-05 08:40:36 +00:00
Compare commits
524 Commits
bagatur/rf
...
eugene/sta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
df32037497 | ||
|
|
f6cfc68bcc | ||
|
|
b766fb8681 | ||
|
|
c581e7172a | ||
|
|
db8db6faae | ||
|
|
c092db862e | ||
|
|
4c25b49229 | ||
|
|
0e52961562 | ||
|
|
81cbf0f2fd | ||
|
|
2619420df1 | ||
|
|
fb686333ac | ||
|
|
ea141511d8 | ||
|
|
97de498d39 | ||
|
|
32db9e74e4 | ||
|
|
5985454269 | ||
|
|
9a6f7e213b | ||
|
|
b3a0c44838 | ||
|
|
68fc0cf909 | ||
|
|
5b92f962f1 | ||
|
|
15b1770326 | ||
|
|
bb284eebe4 | ||
|
|
691480f491 | ||
|
|
52ac67c5d8 | ||
|
|
b9c0cf9025 | ||
|
|
aa7ac57b67 | ||
|
|
302985fea1 | ||
|
|
ed36f9f604 | ||
|
|
f414f5cdb9 | ||
|
|
4cbfeeb1c2 | ||
|
|
b9f3c7a0c9 | ||
|
|
a4a6978224 | ||
|
|
1100f8de7a | ||
|
|
2d96803ddd | ||
|
|
ae167fb5b2 | ||
|
|
623dfcc55c | ||
|
|
20794bb889 | ||
|
|
81985b31e6 | ||
|
|
f3e28289f6 | ||
|
|
114d64d4a7 | ||
|
|
7d6de96186 | ||
|
|
a6b5d45e31 | ||
|
|
d7dd3cd248 | ||
|
|
29f1619d61 | ||
|
|
ee7a7954b9 | ||
|
|
8bc347c5fc | ||
|
|
080904689c | ||
|
|
dc81dba6cf | ||
|
|
f77f7dc3ec | ||
|
|
539a13dbda | ||
|
|
ad48f55357 | ||
|
|
60c5d964a8 | ||
|
|
f4bb33bbf3 | ||
|
|
81e9ab6e3a | ||
|
|
6a08134661 | ||
|
|
d039dcb6ba | ||
|
|
1569b19191 | ||
|
|
27441555d0 | ||
|
|
e169ee8863 | ||
|
|
1831733c2e | ||
|
|
bd4993141d | ||
|
|
4570b477b9 | ||
|
|
ea51cdaede | ||
|
|
a2779738aa | ||
|
|
e1924b3e93 | ||
|
|
96cd50938a | ||
|
|
fc35262356 | ||
|
|
48e303ea10 | ||
|
|
9da1e0cf34 | ||
|
|
f92f7d2e03 | ||
|
|
449d8781ec | ||
|
|
353248838d | ||
|
|
c8a171a154 | ||
|
|
04d134df17 | ||
|
|
07f23c2d45 | ||
|
|
4ac2cb4adc | ||
|
|
5fc67ca2c7 | ||
|
|
68c1878380 | ||
|
|
eb0756f3ee | ||
|
|
25c7d52140 | ||
|
|
343438e872 | ||
|
|
ca1d42785d | ||
|
|
328a498a78 | ||
|
|
10874d5002 | ||
|
|
dd07eddf24 | ||
|
|
30ccc009e6 | ||
|
|
72ae744588 | ||
|
|
7803b973c7 | ||
|
|
5c387a173f | ||
|
|
63702a2044 | ||
|
|
8461700738 | ||
|
|
6c9177681d | ||
|
|
1c1a3a7415 | ||
|
|
a727eec6ed | ||
|
|
24f9c700f2 | ||
|
|
172499404a | ||
|
|
de2d9447c6 | ||
|
|
1cdb813196 | ||
|
|
1eec67e8fe | ||
|
|
55b69d5ad1 | ||
|
|
73d653324f | ||
|
|
b051bba1a9 | ||
|
|
b89d9fc177 | ||
|
|
7ce2f32c64 | ||
|
|
a63cee04ac | ||
|
|
275877980e | ||
|
|
67375e96e0 | ||
|
|
2087cbae64 | ||
|
|
eb04d0d3e2 | ||
|
|
371bec79bc | ||
|
|
7c2f3f6f95 | ||
|
|
db47b5deee | ||
|
|
74f3908182 | ||
|
|
bc768a12ed | ||
|
|
f96dd57501 | ||
|
|
1fd1ac8e95 | ||
|
|
44b33fcc76 | ||
|
|
57c733e560 | ||
|
|
9fda6ac7e6 | ||
|
|
50abeb7ed9 | ||
|
|
d722525c70 | ||
|
|
b7c71e2e07 | ||
|
|
c3c987dd70 | ||
|
|
9298a0b941 | ||
|
|
c2b1abe91b | ||
|
|
c9153a3fd4 | ||
|
|
f6bfb969ba | ||
|
|
e6e60e2492 | ||
|
|
4adac20d7b | ||
|
|
d883fd4a37 | ||
|
|
6c1989d292 | ||
|
|
a5ccf5d33c | ||
|
|
3438d2cbcc | ||
|
|
08fa38d56d | ||
|
|
43e3244573 | ||
|
|
9f2ab37162 | ||
|
|
e2b901c35b | ||
|
|
b9416dc96a | ||
|
|
1701f7b8e9 | ||
|
|
58a2abf089 | ||
|
|
98380cff9b | ||
|
|
e080281623 | ||
|
|
177f51c7bd | ||
|
|
42341bc787 | ||
|
|
31b4e78174 | ||
|
|
dd6f85caf1 | ||
|
|
e192f6b6eb | ||
|
|
7d962278f6 | ||
|
|
69be82c86d | ||
|
|
c54d6eb5da | ||
|
|
d937fa4f9c | ||
|
|
6afb135baa | ||
|
|
b641be2edf | ||
|
|
760a16ff32 | ||
|
|
cbb65741a7 | ||
|
|
ae471a7dcb | ||
|
|
f61cb8d407 | ||
|
|
a89f007947 | ||
|
|
6d0af4e805 | ||
|
|
8bafd2df5e | ||
|
|
2b93206f02 | ||
|
|
1deb8cadd5 | ||
|
|
9d663f31fa | ||
|
|
590d47bff4 | ||
|
|
3c8a115e21 | ||
|
|
4730ee2766 | ||
|
|
12f19b8a6a | ||
|
|
1317578ad1 | ||
|
|
f220af3dce | ||
|
|
0d7fb5f60a | ||
|
|
51b661cfe8 | ||
|
|
5efb5c099f | ||
|
|
7891934173 | ||
|
|
fdab931fd3 | ||
|
|
c7d5ed6f5c | ||
|
|
f481cbb32d | ||
|
|
eefb49680f | ||
|
|
11cb42c2c1 | ||
|
|
bce0684327 | ||
|
|
7bbff98dc7 | ||
|
|
4e27e66938 | ||
|
|
72bfc1d3db | ||
|
|
412148773c | ||
|
|
df234fb171 | ||
|
|
040271f33a | ||
|
|
87dca8e477 | ||
|
|
dfd9787388 | ||
|
|
9e46535ebc | ||
|
|
5999c4a240 | ||
|
|
15d1b73a00 | ||
|
|
a6f0506aaf | ||
|
|
6782dac420 | ||
|
|
4c62362eab | ||
|
|
cd926ac3dd | ||
|
|
d43fa2eab1 | ||
|
|
68be5a7658 | ||
|
|
43534a4c08 | ||
|
|
6a5b084704 | ||
|
|
68ad3414a2 | ||
|
|
8af4425abd | ||
|
|
1b63530274 | ||
|
|
1d865a7e86 | ||
|
|
0486404a74 | ||
|
|
5ee76fccd5 | ||
|
|
eb0c178d75 | ||
|
|
9bf58ec7dd | ||
|
|
8a81fcd5d3 | ||
|
|
6d863bed51 | ||
|
|
4899a72b56 | ||
|
|
2c42f3a955 | ||
|
|
6b58943917 | ||
|
|
ca4f5e2408 | ||
|
|
23722e3653 | ||
|
|
cd52433ba0 | ||
|
|
a37dc83a9e | ||
|
|
af35e2525a | ||
|
|
b9a495e56e | ||
|
|
6da08d0f22 | ||
|
|
d9fd1194f5 | ||
|
|
7ac74f291e | ||
|
|
b4f6066a57 | ||
|
|
e3211c2b3d | ||
|
|
92c34d4803 | ||
|
|
2e31f1c2f8 | ||
|
|
db643f6283 | ||
|
|
76eb553084 | ||
|
|
d7a77054ed | ||
|
|
be8d2ff5f7 | ||
|
|
ac1d7d9de8 | ||
|
|
a99eb3abf4 | ||
|
|
733367b795 | ||
|
|
b0ccaf5917 | ||
|
|
242af4b5a4 | ||
|
|
7e66d964c6 | ||
|
|
d7c607ca00 | ||
|
|
b3f4de38ae | ||
|
|
c53aa5cd37 | ||
|
|
c673717c2b | ||
|
|
5ab69f907f | ||
|
|
a4896da2a0 | ||
|
|
ce682f5a09 | ||
|
|
9b8f6455b1 | ||
|
|
1e8ab83d7b | ||
|
|
3589a135ef | ||
|
|
69344a0661 | ||
|
|
257879e98d | ||
|
|
28cf3aab45 | ||
|
|
166f3d8351 | ||
|
|
e867557936 | ||
|
|
23fc7c8c90 | ||
|
|
767523f364 | ||
|
|
96bff0ed5d | ||
|
|
cd3ab3703b | ||
|
|
62a30efb12 | ||
|
|
f5cf6975ba | ||
|
|
b1d9ce541d | ||
|
|
935aefa8db | ||
|
|
719a1cde75 | ||
|
|
2716d58603 | ||
|
|
9147a437f1 | ||
|
|
e3b7779926 | ||
|
|
248c5b84ee | ||
|
|
3b5bdbfee8 | ||
|
|
a2d5fa7649 | ||
|
|
a01e8473f8 | ||
|
|
4d6cd5b46a | ||
|
|
e42110f720 | ||
|
|
5afb242161 | ||
|
|
3b08617a89 | ||
|
|
17ecf6e119 | ||
|
|
cc69976860 | ||
|
|
b8b5ce0c8c | ||
|
|
c06a8732aa | ||
|
|
5d2d80a9a8 | ||
|
|
c9eac3287e | ||
|
|
7fc903464a | ||
|
|
3f6bf852ea | ||
|
|
56b955fc31 | ||
|
|
68527b809d | ||
|
|
1c1bb1152e | ||
|
|
e85948d46b | ||
|
|
e566a3077e | ||
|
|
1a3383fba1 | ||
|
|
a05fb19f42 | ||
|
|
e8be34f8c7 | ||
|
|
ee6a773456 | ||
|
|
11cf95e810 | ||
|
|
9ebbca3695 | ||
|
|
b948f6da67 | ||
|
|
22b964f802 | ||
|
|
29e0445490 | ||
|
|
4c132b4cc6 | ||
|
|
9b982b2aba | ||
|
|
4197efd67a | ||
|
|
d9e6ca2279 | ||
|
|
b46d6b04e1 | ||
|
|
cc0290fdf3 | ||
|
|
a2886c4509 | ||
|
|
8dda7c32ba | ||
|
|
e045655657 | ||
|
|
0534ba5a7d | ||
|
|
b15fccbb99 | ||
|
|
46505742eb | ||
|
|
afc1def49b | ||
|
|
f6a98032e4 | ||
|
|
cd806400fc | ||
|
|
9678797625 | ||
|
|
15e42f1799 | ||
|
|
50ba3c68bb | ||
|
|
6ef12fdfd2 | ||
|
|
c05cbf0533 | ||
|
|
ed789be8f4 | ||
|
|
971d29e718 | ||
|
|
b0cfb86c48 | ||
|
|
b5f8cf9509 | ||
|
|
f685d2f50c | ||
|
|
29660f8918 | ||
|
|
9b0b0032c2 | ||
|
|
e8633e53c4 | ||
|
|
78521caf51 | ||
|
|
4f88a5130e | ||
|
|
9775de46cc | ||
|
|
f6e3aa9770 | ||
|
|
d068e8ea54 | ||
|
|
e237dcec91 | ||
|
|
9cf6661dc5 | ||
|
|
a51a257575 | ||
|
|
ecd72d26cf | ||
|
|
a53370a060 | ||
|
|
e5e38e89ce | ||
|
|
da957a22cc | ||
|
|
919b8a387f | ||
|
|
7248e98b9e | ||
|
|
1ec8199c8e | ||
|
|
42f158c128 | ||
|
|
0e26b16930 | ||
|
|
66e1005898 | ||
|
|
3d91be94b1 | ||
|
|
c524bf31f5 | ||
|
|
3019a594b7 | ||
|
|
815ec74298 | ||
|
|
375051a64e | ||
|
|
762f49162a | ||
|
|
9e54c227f1 | ||
|
|
242981b8f0 | ||
|
|
581095b9b5 | ||
|
|
ed0b7c3b72 | ||
|
|
5019951a5d | ||
|
|
2f2b77602e | ||
|
|
d9aa11d589 | ||
|
|
f8a3b8e83f | ||
|
|
3acd0c74fc | ||
|
|
afc1ba0329 | ||
|
|
144f59b5fe | ||
|
|
9ece134d45 | ||
|
|
29ee0496b6 | ||
|
|
31891092d8 | ||
|
|
1b0802babe | ||
|
|
f541545c96 | ||
|
|
41726dfa27 | ||
|
|
0a9a519a39 | ||
|
|
7735721929 | ||
|
|
6f5b7b55bd | ||
|
|
8381f859b4 | ||
|
|
e6311d953d | ||
|
|
c1bb5fd498 | ||
|
|
5395c254d5 | ||
|
|
a206d3cf69 | ||
|
|
f59ddcab74 | ||
|
|
691ff67096 | ||
|
|
bebe401b1a | ||
|
|
4e28888d45 | ||
|
|
f154cd64fe | ||
|
|
223e5eff14 | ||
|
|
6e854ae371 | ||
|
|
47b1b7092d | ||
|
|
3ba1cb8650 | ||
|
|
33555e5cbc | ||
|
|
92e52e89ca | ||
|
|
441160d6b3 | ||
|
|
b13e52b6ac | ||
|
|
865cabff05 | ||
|
|
07ee41d284 | ||
|
|
5ed16adbde | ||
|
|
da7bca2178 | ||
|
|
441448372d | ||
|
|
a9d3c100a2 | ||
|
|
ad285ca15c | ||
|
|
ea61302f71 | ||
|
|
919ebcc596 | ||
|
|
6275d8b1bf | ||
|
|
86ae48b781 | ||
|
|
0d294760e7 | ||
|
|
8009be862e | ||
|
|
6c18f73ca5 | ||
|
|
e92e96193f | ||
|
|
43dc5d3416 | ||
|
|
1d2aa19aee | ||
|
|
73edf17b4e | ||
|
|
a058c8812d | ||
|
|
d7c26c89b2 | ||
|
|
8d4547ae97 | ||
|
|
75465a2a3c | ||
|
|
2a239710a0 | ||
|
|
19ebc7418e | ||
|
|
0b33abc8b1 | ||
|
|
e25b722ea9 | ||
|
|
b4fa847a90 | ||
|
|
8f14234afb | ||
|
|
bf8e3c6dd1 | ||
|
|
64743dea14 | ||
|
|
9d7ca7df6e | ||
|
|
c8d96f30bd | ||
|
|
8f5c70769d | ||
|
|
44db4412c0 | ||
|
|
0835ebad70 | ||
|
|
88af4fd514 | ||
|
|
aa31025dd7 | ||
|
|
cc562e7c58 | ||
|
|
5240ecab99 | ||
|
|
f6f0ca1bae | ||
|
|
6cc6faa00e | ||
|
|
20a56fe0a2 | ||
|
|
bccc9241ea | ||
|
|
a84a3add25 | ||
|
|
6746adf363 | ||
|
|
789cd5198d | ||
|
|
387cacb881 | ||
|
|
ff1f985a2a | ||
|
|
f3e4a0e27f | ||
|
|
53b8c86309 | ||
|
|
fc1617c44f | ||
|
|
79119b4345 | ||
|
|
ca2d4078f3 | ||
|
|
e438fe6be9 | ||
|
|
7ae3ce60d2 | ||
|
|
91bcc9c5c9 | ||
|
|
7c6009b76f | ||
|
|
86d3e42853 | ||
|
|
916332ef5b | ||
|
|
f6d3a3546f | ||
|
|
c776cfc599 | ||
|
|
d07db457fc | ||
|
|
70c296ae96 | ||
|
|
de9a6cdf16 | ||
|
|
8562a1e7d4 | ||
|
|
e36bc379f2 | ||
|
|
0bc4a9b3fc | ||
|
|
5ce1827d31 | ||
|
|
685d62b032 | ||
|
|
bfaa8c3048 | ||
|
|
a99c667c22 | ||
|
|
d7418acbe1 | ||
|
|
9e8a3fc4ff | ||
|
|
c502736841 | ||
|
|
5738143d4b | ||
|
|
50b48a8e6a | ||
|
|
a8f530bc4d | ||
|
|
dd68a8716e | ||
|
|
1aeb52caac | ||
|
|
54373fb384 | ||
|
|
50de7a31f0 | ||
|
|
8a3b74fe1f | ||
|
|
2c076bebc9 | ||
|
|
f746a73e26 | ||
|
|
5dca107621 | ||
|
|
8d6cc90fc5 | ||
|
|
90f55e6bd1 | ||
|
|
b5d3416563 | ||
|
|
de7c4b277c | ||
|
|
39342d98d6 | ||
|
|
89b765ec27 | ||
|
|
ab3d944667 | ||
|
|
112e10e933 | ||
|
|
9eb1b56e73 | ||
|
|
37678471c4 | ||
|
|
2df7387c91 | ||
|
|
5d06797905 | ||
|
|
15baffc484 | ||
|
|
e5c76f9dbd | ||
|
|
10bdf2422c | ||
|
|
065cde69b1 | ||
|
|
db6f266d97 | ||
|
|
e5472b5eb8 | ||
|
|
729c6d6827 | ||
|
|
3925071dd6 | ||
|
|
c0ce93236a | ||
|
|
37e1275f9e | ||
|
|
9f1cbbc6ed | ||
|
|
0834457f28 | ||
|
|
d70a5bbf15 | ||
|
|
1bbb64d956 | ||
|
|
e1cfd0f3e7 | ||
|
|
df7cbd6fbb | ||
|
|
1fdd9bd980 | ||
|
|
1987f905ed | ||
|
|
cd00a87db7 | ||
|
|
f9f5626ca4 | ||
|
|
722aae4fd1 | ||
|
|
c454dc36fc | ||
|
|
e3b775e035 | ||
|
|
64938ae6f2 | ||
|
|
604e117411 | ||
|
|
4986e7227e | ||
|
|
30af711c34 | ||
|
|
e135dc70c3 | ||
|
|
ab025507bc | ||
|
|
fb7552bfcf | ||
|
|
93472ee9e6 | ||
|
|
37ef6ac113 | ||
|
|
b87d6f9f48 | ||
|
|
22638e5927 | ||
|
|
841e5f514e | ||
|
|
ece4b43a81 | ||
|
|
0653aa469a | ||
|
|
ce9a68791b | ||
|
|
e1bc623f8f | ||
|
|
bd0ad6637a | ||
|
|
37629516cd | ||
|
|
b48fa8b695 | ||
|
|
f7e453971d | ||
|
|
54fa78c887 | ||
|
|
a23c719c8b | ||
|
|
584b647b96 | ||
|
|
93da18b667 |
41
.github/CONTRIBUTING.md
vendored
41
.github/CONTRIBUTING.md
vendored
@@ -3,43 +3,4 @@
|
||||
Hi there! Thank you for even being interested in contributing to LangChain.
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether they involve new features, improved infrastructure, better documentation, or bug fixes.
|
||||
|
||||
To learn about how to contribute, please follow the [guides here](https://python.langchain.com/docs/contributing/)
|
||||
|
||||
## 🗺️ Guidelines
|
||||
|
||||
### 👩💻 Ways to contribute
|
||||
|
||||
There are many ways to contribute to LangChain. Here are some common ways people contribute:
|
||||
|
||||
- [**Documentation**](https://python.langchain.com/docs/contributing/documentation): Help improve our docs, including this one!
|
||||
- [**Code**](https://python.langchain.com/docs/contributing/code): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Integrations**](https://python.langchain.com/docs/contributing/integrations): Help us integrate with your favorite vendors and tools.
|
||||
|
||||
### 🚩GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/langchain-ai/langchain/issues) page is kept up to date with bugs, improvements, and feature requests.
|
||||
|
||||
There is a taxonomy of labels to help with sorting and discovery of issues of interest. Please use these to help organize issues.
|
||||
|
||||
If you start working on an issue, please assign it to yourself.
|
||||
|
||||
If you are adding an issue, please try to keep it focused on a single, modular bug/improvement/feature.
|
||||
If two issues are related, or blocking, please link them rather than combining them.
|
||||
|
||||
We will try to keep these issues as up-to-date as possible, though
|
||||
with the rapid rate of development in this field some may get out of date.
|
||||
If you notice this happening, please let us know.
|
||||
|
||||
### 🙋Getting Help
|
||||
|
||||
Our goal is to have the simplest developer setup possible. Should you experience any difficulty getting setup, please
|
||||
contact a maintainer! Not only do we want to help get you unblocked, but we also want to make sure that the process is
|
||||
smooth for future contributors.
|
||||
|
||||
In a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase.
|
||||
If you are finding these difficult (or even just annoying) to work with, feel free to contact a maintainer for help -
|
||||
we do not want these to get in the way of getting good code into the codebase.
|
||||
|
||||
### Contributor Documentation
|
||||
|
||||
To learn about how to contribute, please follow the [guides here](https://python.langchain.com/docs/contributing/)
|
||||
To learn how to contribute to LangChain, please follow the [contribution guide here](https://python.langchain.com/docs/contributing/).
|
||||
10
.github/DISCUSSION_TEMPLATE/q-a.yml
vendored
10
.github/DISCUSSION_TEMPLATE/q-a.yml
vendored
@@ -3,18 +3,18 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for your interest in 🦜️🔗 LangChain!
|
||||
Thanks for your interest in LangChain 🦜️🔗!
|
||||
|
||||
Please follow these instructions, fill every question, and do every step. 🙏
|
||||
|
||||
We're asking for this because answering questions and solving problems in GitHub takes a lot of time --
|
||||
this is time that we cannot spend on adding new features, fixing bugs, write documentation or reviewing pull requests.
|
||||
this is time that we cannot spend on adding new features, fixing bugs, writing documentation or reviewing pull requests.
|
||||
|
||||
By asking questions in a structured way (following this) it will be much easier to help you.
|
||||
By asking questions in a structured way (following this) it will be much easier for us to help you.
|
||||
|
||||
And there's a high chance that you will find the solution along the way and you won't even have to submit it and wait for an answer. 😎
|
||||
There's a high chance that by following this process, you'll find the solution on your own, eliminating the need to submit a question and wait for an answer. 😎
|
||||
|
||||
As there are too many questions, we will **DISCARD** and close the incomplete ones.
|
||||
As there are many questions submitted every day, we will **DISCARD** and close the incomplete ones.
|
||||
|
||||
That will allow us (and others) to focus on helping people like you that follow the whole process. 🤓
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -35,6 +35,8 @@ body:
|
||||
required: true
|
||||
- label: I am sure that this is a bug in LangChain rather than my code.
|
||||
required: true
|
||||
- label: The bug is not resolved by updating to the latest stable version of LangChain (or the specific integration package).
|
||||
required: true
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/privileged.yml
vendored
2
.github/ISSUE_TEMPLATE/privileged.yml
vendored
@@ -9,7 +9,7 @@ body:
|
||||
If you are not a LangChain maintainer or were not asked directly by a maintainer to create an issue, then please start the conversation in a [Question in GitHub Discussions](https://github.com/langchain-ai/langchain/discussions/categories/q-a) instead.
|
||||
|
||||
You are a LangChain maintainer if you maintain any of the packages inside of the LangChain repository
|
||||
or are a regular contributor to LangChain with previous merged merged pull requests.
|
||||
or are a regular contributor to LangChain with previous merged pull requests.
|
||||
- type: checkboxes
|
||||
id: privileged
|
||||
attributes:
|
||||
|
||||
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,20 +1,29 @@
|
||||
<!-- Thank you for contributing to LangChain!
|
||||
Thank you for contributing to LangChain!
|
||||
|
||||
Please title your PR "<package>: <description>", where <package> is whichever of langchain, community, core, experimental, etc. is being modified.
|
||||
- [ ] **PR title**: "package: description"
|
||||
- Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes.
|
||||
- Example: "community: add foobar LLM"
|
||||
|
||||
Replace this entire comment with:
|
||||
- **Description:** a description of the change,
|
||||
- **Issue:** the issue # it fixes if applicable,
|
||||
- **Dependencies:** any dependencies required for this change,
|
||||
- **Twitter handle:** we announce bigger features on Twitter. If your PR gets announced, and you'd like a mention, we'll gladly shout you out!
|
||||
|
||||
Please make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` from the root of the package you've modified to check this locally.
|
||||
- [ ] **PR message**: ***Delete this entire checklist*** and replace with
|
||||
- **Description:** a description of the change
|
||||
- **Issue:** the issue # it fixes, if applicable
|
||||
- **Dependencies:** any dependencies required for this change
|
||||
- **Twitter handle:** if your PR gets announced, and you'd like a mention, we'll gladly shout you out!
|
||||
|
||||
See contribution guidelines for more information on how to write/run tests, lint, etc: https://python.langchain.com/docs/contributing/
|
||||
|
||||
If you're adding a new integration, please include:
|
||||
- [ ] **Add tests and docs**: If you're adding a new integration, please include
|
||||
1. a test for the integration, preferably unit tests that do not rely on network access,
|
||||
2. an example notebook showing its use. It lives in `docs/docs/integrations` directory.
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of @baskaryan, @eyurtsev, @hwchase17.
|
||||
-->
|
||||
|
||||
- [ ] **Lint and test**: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified. See contribution guidelines for more: https://python.langchain.com/docs/contributing/
|
||||
|
||||
Additional guidelines:
|
||||
- Make sure optional dependencies are imported within a function.
|
||||
- Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests.
|
||||
- Most PRs should not touch more than one package.
|
||||
- Changes should be backwards compatible.
|
||||
- If you are adding something to community, do not re-import it in langchain.
|
||||
|
||||
If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17.
|
||||
|
||||
7
.github/actions/people/Dockerfile
vendored
Normal file
7
.github/actions/people/Dockerfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM python:3.9
|
||||
|
||||
RUN pip install httpx PyGithub "pydantic==2.0.2" pydantic-settings "pyyaml>=5.3.1,<6.0.0"
|
||||
|
||||
COPY ./app /app
|
||||
|
||||
CMD ["python", "/app/main.py"]
|
||||
11
.github/actions/people/action.yml
vendored
Normal file
11
.github/actions/people/action.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Adapted from https://github.com/tiangolo/fastapi/blob/master/.github/actions/people/action.yml
|
||||
name: "Generate LangChain People"
|
||||
description: "Generate the data for the LangChain People page"
|
||||
author: "Jacob Lee <jacob@langchain.dev>"
|
||||
inputs:
|
||||
token:
|
||||
description: 'User token, to read the GitHub API. Can be passed in using {{ secrets.LANGCHAIN_PEOPLE_GITHUB_TOKEN }}'
|
||||
required: true
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'Dockerfile'
|
||||
641
.github/actions/people/app/main.py
vendored
Normal file
641
.github/actions/people/app/main.py
vendored
Normal file
@@ -0,0 +1,641 @@
|
||||
# Adapted from https://github.com/tiangolo/fastapi/blob/master/.github/actions/people/app/main.py
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import Counter
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Container, Dict, List, Set, Union
|
||||
|
||||
import httpx
|
||||
import yaml
|
||||
from github import Github
|
||||
from pydantic import BaseModel, SecretStr
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
github_graphql_url = "https://api.github.com/graphql"
|
||||
questions_category_id = "DIC_kwDOIPDwls4CS6Ve"
|
||||
|
||||
# discussions_query = """
|
||||
# query Q($after: String, $category_id: ID) {
|
||||
# repository(name: "langchain", owner: "langchain-ai") {
|
||||
# discussions(first: 100, after: $after, categoryId: $category_id) {
|
||||
# edges {
|
||||
# cursor
|
||||
# node {
|
||||
# number
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# title
|
||||
# createdAt
|
||||
# comments(first: 100) {
|
||||
# nodes {
|
||||
# createdAt
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# isAnswer
|
||||
# replies(first: 10) {
|
||||
# nodes {
|
||||
# createdAt
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# """
|
||||
|
||||
# issues_query = """
|
||||
# query Q($after: String) {
|
||||
# repository(name: "langchain", owner: "langchain-ai") {
|
||||
# issues(first: 100, after: $after) {
|
||||
# edges {
|
||||
# cursor
|
||||
# node {
|
||||
# number
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# title
|
||||
# createdAt
|
||||
# state
|
||||
# comments(first: 100) {
|
||||
# nodes {
|
||||
# createdAt
|
||||
# author {
|
||||
# login
|
||||
# avatarUrl
|
||||
# url
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# """
|
||||
|
||||
prs_query = """
|
||||
query Q($after: String) {
|
||||
repository(name: "langchain", owner: "langchain-ai") {
|
||||
pullRequests(first: 100, after: $after, states: MERGED) {
|
||||
edges {
|
||||
cursor
|
||||
node {
|
||||
changedFiles
|
||||
additions
|
||||
deletions
|
||||
number
|
||||
labels(first: 100) {
|
||||
nodes {
|
||||
name
|
||||
}
|
||||
}
|
||||
author {
|
||||
login
|
||||
avatarUrl
|
||||
url
|
||||
... on User {
|
||||
twitterUsername
|
||||
}
|
||||
}
|
||||
title
|
||||
createdAt
|
||||
state
|
||||
reviews(first:100) {
|
||||
nodes {
|
||||
author {
|
||||
login
|
||||
avatarUrl
|
||||
url
|
||||
... on User {
|
||||
twitterUsername
|
||||
}
|
||||
}
|
||||
state
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
class Author(BaseModel):
|
||||
login: str
|
||||
avatarUrl: str
|
||||
url: str
|
||||
twitterUsername: Union[str, None] = None
|
||||
|
||||
|
||||
# Issues and Discussions
|
||||
|
||||
|
||||
class CommentsNode(BaseModel):
|
||||
createdAt: datetime
|
||||
author: Union[Author, None] = None
|
||||
|
||||
|
||||
class Replies(BaseModel):
|
||||
nodes: List[CommentsNode]
|
||||
|
||||
|
||||
class DiscussionsCommentsNode(CommentsNode):
|
||||
replies: Replies
|
||||
|
||||
|
||||
class Comments(BaseModel):
|
||||
nodes: List[CommentsNode]
|
||||
|
||||
|
||||
class DiscussionsComments(BaseModel):
|
||||
nodes: List[DiscussionsCommentsNode]
|
||||
|
||||
|
||||
class IssuesNode(BaseModel):
|
||||
number: int
|
||||
author: Union[Author, None] = None
|
||||
title: str
|
||||
createdAt: datetime
|
||||
state: str
|
||||
comments: Comments
|
||||
|
||||
|
||||
class DiscussionsNode(BaseModel):
|
||||
number: int
|
||||
author: Union[Author, None] = None
|
||||
title: str
|
||||
createdAt: datetime
|
||||
comments: DiscussionsComments
|
||||
|
||||
|
||||
class IssuesEdge(BaseModel):
|
||||
cursor: str
|
||||
node: IssuesNode
|
||||
|
||||
|
||||
class DiscussionsEdge(BaseModel):
|
||||
cursor: str
|
||||
node: DiscussionsNode
|
||||
|
||||
|
||||
class Issues(BaseModel):
|
||||
edges: List[IssuesEdge]
|
||||
|
||||
|
||||
class Discussions(BaseModel):
|
||||
edges: List[DiscussionsEdge]
|
||||
|
||||
|
||||
class IssuesRepository(BaseModel):
|
||||
issues: Issues
|
||||
|
||||
|
||||
class DiscussionsRepository(BaseModel):
|
||||
discussions: Discussions
|
||||
|
||||
|
||||
class IssuesResponseData(BaseModel):
|
||||
repository: IssuesRepository
|
||||
|
||||
|
||||
class DiscussionsResponseData(BaseModel):
|
||||
repository: DiscussionsRepository
|
||||
|
||||
|
||||
class IssuesResponse(BaseModel):
|
||||
data: IssuesResponseData
|
||||
|
||||
|
||||
class DiscussionsResponse(BaseModel):
|
||||
data: DiscussionsResponseData
|
||||
|
||||
|
||||
# PRs
|
||||
|
||||
|
||||
class LabelNode(BaseModel):
|
||||
name: str
|
||||
|
||||
|
||||
class Labels(BaseModel):
|
||||
nodes: List[LabelNode]
|
||||
|
||||
|
||||
class ReviewNode(BaseModel):
|
||||
author: Union[Author, None] = None
|
||||
state: str
|
||||
|
||||
|
||||
class Reviews(BaseModel):
|
||||
nodes: List[ReviewNode]
|
||||
|
||||
|
||||
class PullRequestNode(BaseModel):
|
||||
number: int
|
||||
labels: Labels
|
||||
author: Union[Author, None] = None
|
||||
changedFiles: int
|
||||
additions: int
|
||||
deletions: int
|
||||
title: str
|
||||
createdAt: datetime
|
||||
state: str
|
||||
reviews: Reviews
|
||||
# comments: Comments
|
||||
|
||||
|
||||
class PullRequestEdge(BaseModel):
|
||||
cursor: str
|
||||
node: PullRequestNode
|
||||
|
||||
|
||||
class PullRequests(BaseModel):
|
||||
edges: List[PullRequestEdge]
|
||||
|
||||
|
||||
class PRsRepository(BaseModel):
|
||||
pullRequests: PullRequests
|
||||
|
||||
|
||||
class PRsResponseData(BaseModel):
|
||||
repository: PRsRepository
|
||||
|
||||
|
||||
class PRsResponse(BaseModel):
|
||||
data: PRsResponseData
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
input_token: SecretStr
|
||||
github_repository: str
|
||||
httpx_timeout: int = 30
|
||||
|
||||
|
||||
def get_graphql_response(
|
||||
*,
|
||||
settings: Settings,
|
||||
query: str,
|
||||
after: Union[str, None] = None,
|
||||
category_id: Union[str, None] = None,
|
||||
) -> Dict[str, Any]:
|
||||
headers = {"Authorization": f"token {settings.input_token.get_secret_value()}"}
|
||||
# category_id is only used by one query, but GraphQL allows unused variables, so
|
||||
# keep it here for simplicity
|
||||
variables = {"after": after, "category_id": category_id}
|
||||
response = httpx.post(
|
||||
github_graphql_url,
|
||||
headers=headers,
|
||||
timeout=settings.httpx_timeout,
|
||||
json={"query": query, "variables": variables, "operationName": "Q"},
|
||||
)
|
||||
if response.status_code != 200:
|
||||
logging.error(
|
||||
f"Response was not 200, after: {after}, category_id: {category_id}"
|
||||
)
|
||||
logging.error(response.text)
|
||||
raise RuntimeError(response.text)
|
||||
data = response.json()
|
||||
if "errors" in data:
|
||||
logging.error(f"Errors in response, after: {after}, category_id: {category_id}")
|
||||
logging.error(data["errors"])
|
||||
logging.error(response.text)
|
||||
raise RuntimeError(response.text)
|
||||
return data
|
||||
|
||||
|
||||
# def get_graphql_issue_edges(*, settings: Settings, after: Union[str, None] = None):
|
||||
# data = get_graphql_response(settings=settings, query=issues_query, after=after)
|
||||
# graphql_response = IssuesResponse.model_validate(data)
|
||||
# return graphql_response.data.repository.issues.edges
|
||||
|
||||
|
||||
# def get_graphql_question_discussion_edges(
|
||||
# *,
|
||||
# settings: Settings,
|
||||
# after: Union[str, None] = None,
|
||||
# ):
|
||||
# data = get_graphql_response(
|
||||
# settings=settings,
|
||||
# query=discussions_query,
|
||||
# after=after,
|
||||
# category_id=questions_category_id,
|
||||
# )
|
||||
# graphql_response = DiscussionsResponse.model_validate(data)
|
||||
# return graphql_response.data.repository.discussions.edges
|
||||
|
||||
|
||||
def get_graphql_pr_edges(*, settings: Settings, after: Union[str, None] = None):
|
||||
if after is None:
|
||||
print("Querying PRs...")
|
||||
else:
|
||||
print(f"Querying PRs with cursor {after}...")
|
||||
data = get_graphql_response(
|
||||
settings=settings,
|
||||
query=prs_query,
|
||||
after=after
|
||||
)
|
||||
graphql_response = PRsResponse.model_validate(data)
|
||||
return graphql_response.data.repository.pullRequests.edges
|
||||
|
||||
|
||||
# def get_issues_experts(settings: Settings):
|
||||
# issue_nodes: List[IssuesNode] = []
|
||||
# issue_edges = get_graphql_issue_edges(settings=settings)
|
||||
|
||||
# while issue_edges:
|
||||
# for edge in issue_edges:
|
||||
# issue_nodes.append(edge.node)
|
||||
# last_edge = issue_edges[-1]
|
||||
# issue_edges = get_graphql_issue_edges(settings=settings, after=last_edge.cursor)
|
||||
|
||||
# commentors = Counter()
|
||||
# last_month_commentors = Counter()
|
||||
# authors: Dict[str, Author] = {}
|
||||
|
||||
# now = datetime.now(tz=timezone.utc)
|
||||
# one_month_ago = now - timedelta(days=30)
|
||||
|
||||
# for issue in issue_nodes:
|
||||
# issue_author_name = None
|
||||
# if issue.author:
|
||||
# authors[issue.author.login] = issue.author
|
||||
# issue_author_name = issue.author.login
|
||||
# issue_commentors = set()
|
||||
# for comment in issue.comments.nodes:
|
||||
# if comment.author:
|
||||
# authors[comment.author.login] = comment.author
|
||||
# if comment.author.login != issue_author_name:
|
||||
# issue_commentors.add(comment.author.login)
|
||||
# for author_name in issue_commentors:
|
||||
# commentors[author_name] += 1
|
||||
# if issue.createdAt > one_month_ago:
|
||||
# last_month_commentors[author_name] += 1
|
||||
|
||||
# return commentors, last_month_commentors, authors
|
||||
|
||||
|
||||
# def get_discussions_experts(settings: Settings):
|
||||
# discussion_nodes: List[DiscussionsNode] = []
|
||||
# discussion_edges = get_graphql_question_discussion_edges(settings=settings)
|
||||
|
||||
# while discussion_edges:
|
||||
# for discussion_edge in discussion_edges:
|
||||
# discussion_nodes.append(discussion_edge.node)
|
||||
# last_edge = discussion_edges[-1]
|
||||
# discussion_edges = get_graphql_question_discussion_edges(
|
||||
# settings=settings, after=last_edge.cursor
|
||||
# )
|
||||
|
||||
# commentors = Counter()
|
||||
# last_month_commentors = Counter()
|
||||
# authors: Dict[str, Author] = {}
|
||||
|
||||
# now = datetime.now(tz=timezone.utc)
|
||||
# one_month_ago = now - timedelta(days=30)
|
||||
|
||||
# for discussion in discussion_nodes:
|
||||
# discussion_author_name = None
|
||||
# if discussion.author:
|
||||
# authors[discussion.author.login] = discussion.author
|
||||
# discussion_author_name = discussion.author.login
|
||||
# discussion_commentors = set()
|
||||
# for comment in discussion.comments.nodes:
|
||||
# if comment.author:
|
||||
# authors[comment.author.login] = comment.author
|
||||
# if comment.author.login != discussion_author_name:
|
||||
# discussion_commentors.add(comment.author.login)
|
||||
# for reply in comment.replies.nodes:
|
||||
# if reply.author:
|
||||
# authors[reply.author.login] = reply.author
|
||||
# if reply.author.login != discussion_author_name:
|
||||
# discussion_commentors.add(reply.author.login)
|
||||
# for author_name in discussion_commentors:
|
||||
# commentors[author_name] += 1
|
||||
# if discussion.createdAt > one_month_ago:
|
||||
# last_month_commentors[author_name] += 1
|
||||
# return commentors, last_month_commentors, authors
|
||||
|
||||
|
||||
# def get_experts(settings: Settings):
|
||||
# (
|
||||
# discussions_commentors,
|
||||
# discussions_last_month_commentors,
|
||||
# discussions_authors,
|
||||
# ) = get_discussions_experts(settings=settings)
|
||||
# commentors = discussions_commentors
|
||||
# last_month_commentors = discussions_last_month_commentors
|
||||
# authors = {**discussions_authors}
|
||||
# return commentors, last_month_commentors, authors
|
||||
|
||||
|
||||
def _logistic(x, k):
|
||||
return x / (x + k)
|
||||
|
||||
|
||||
def get_contributors(settings: Settings):
|
||||
pr_nodes: List[PullRequestNode] = []
|
||||
pr_edges = get_graphql_pr_edges(settings=settings)
|
||||
|
||||
while pr_edges:
|
||||
for edge in pr_edges:
|
||||
pr_nodes.append(edge.node)
|
||||
last_edge = pr_edges[-1]
|
||||
pr_edges = get_graphql_pr_edges(settings=settings, after=last_edge.cursor)
|
||||
|
||||
contributors = Counter()
|
||||
contributor_scores = Counter()
|
||||
recent_contributor_scores = Counter()
|
||||
reviewers = Counter()
|
||||
authors: Dict[str, Author] = {}
|
||||
|
||||
for pr in pr_nodes:
|
||||
pr_reviewers: Set[str] = set()
|
||||
for review in pr.reviews.nodes:
|
||||
if review.author:
|
||||
authors[review.author.login] = review.author
|
||||
pr_reviewers.add(review.author.login)
|
||||
for reviewer in pr_reviewers:
|
||||
reviewers[reviewer] += 1
|
||||
if pr.author:
|
||||
authors[pr.author.login] = pr.author
|
||||
contributors[pr.author.login] += 1
|
||||
files_changed = pr.changedFiles
|
||||
lines_changed = pr.additions + pr.deletions
|
||||
score = _logistic(files_changed, 20) + _logistic(lines_changed, 100)
|
||||
contributor_scores[pr.author.login] += score
|
||||
three_months_ago = (datetime.now(timezone.utc) - timedelta(days=3*30))
|
||||
if pr.createdAt > three_months_ago:
|
||||
recent_contributor_scores[pr.author.login] += score
|
||||
return contributors, contributor_scores, recent_contributor_scores, reviewers, authors
|
||||
|
||||
|
||||
def get_top_users(
|
||||
*,
|
||||
counter: Counter,
|
||||
min_count: int,
|
||||
authors: Dict[str, Author],
|
||||
skip_users: Container[str],
|
||||
):
|
||||
users = []
|
||||
for commentor, count in counter.most_common():
|
||||
if commentor in skip_users:
|
||||
continue
|
||||
if count >= min_count:
|
||||
author = authors[commentor]
|
||||
users.append(
|
||||
{
|
||||
"login": commentor,
|
||||
"count": count,
|
||||
"avatarUrl": author.avatarUrl,
|
||||
"twitterUsername": author.twitterUsername,
|
||||
"url": author.url,
|
||||
}
|
||||
)
|
||||
return users
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
settings = Settings()
|
||||
logging.info(f"Using config: {settings.model_dump_json()}")
|
||||
g = Github(settings.input_token.get_secret_value())
|
||||
repo = g.get_repo(settings.github_repository)
|
||||
# question_commentors, question_last_month_commentors, question_authors = get_experts(
|
||||
# settings=settings
|
||||
# )
|
||||
contributors, contributor_scores, recent_contributor_scores, reviewers, pr_authors = get_contributors(
|
||||
settings=settings
|
||||
)
|
||||
# authors = {**question_authors, **pr_authors}
|
||||
authors = {**pr_authors}
|
||||
maintainers_logins = {
|
||||
"hwchase17",
|
||||
"agola11",
|
||||
"baskaryan",
|
||||
"hinthornw",
|
||||
"nfcampos",
|
||||
"efriis",
|
||||
"eyurtsev",
|
||||
"rlancemartin"
|
||||
}
|
||||
hidden_logins = {
|
||||
"dev2049",
|
||||
"vowelparrot",
|
||||
"obi1kenobi",
|
||||
"langchain-infra",
|
||||
"jacoblee93",
|
||||
"dqbd",
|
||||
"bracesproul",
|
||||
"akira",
|
||||
}
|
||||
bot_names = {"dosubot", "github-actions", "CodiumAI-Agent"}
|
||||
maintainers = []
|
||||
for login in maintainers_logins:
|
||||
user = authors[login]
|
||||
maintainers.append(
|
||||
{
|
||||
"login": login,
|
||||
"count": contributors[login], #+ question_commentors[login],
|
||||
"avatarUrl": user.avatarUrl,
|
||||
"twitterUsername": user.twitterUsername,
|
||||
"url": user.url,
|
||||
}
|
||||
)
|
||||
|
||||
# min_count_expert = 10
|
||||
# min_count_last_month = 3
|
||||
min_score_contributor = 1
|
||||
min_count_reviewer = 5
|
||||
skip_users = maintainers_logins | bot_names | hidden_logins
|
||||
# experts = get_top_users(
|
||||
# counter=question_commentors,
|
||||
# min_count=min_count_expert,
|
||||
# authors=authors,
|
||||
# skip_users=skip_users,
|
||||
# )
|
||||
# last_month_active = get_top_users(
|
||||
# counter=question_last_month_commentors,
|
||||
# min_count=min_count_last_month,
|
||||
# authors=authors,
|
||||
# skip_users=skip_users,
|
||||
# )
|
||||
top_recent_contributors = get_top_users(
|
||||
counter=recent_contributor_scores,
|
||||
min_count=min_score_contributor,
|
||||
authors=authors,
|
||||
skip_users=skip_users,
|
||||
)
|
||||
top_contributors = get_top_users(
|
||||
counter=contributor_scores,
|
||||
min_count=min_score_contributor,
|
||||
authors=authors,
|
||||
skip_users=skip_users,
|
||||
)
|
||||
top_reviewers = get_top_users(
|
||||
counter=reviewers,
|
||||
min_count=min_count_reviewer,
|
||||
authors=authors,
|
||||
skip_users=skip_users,
|
||||
)
|
||||
|
||||
people = {
|
||||
"maintainers": maintainers,
|
||||
# "experts": experts,
|
||||
# "last_month_active": last_month_active,
|
||||
"top_recent_contributors": top_recent_contributors,
|
||||
"top_contributors": top_contributors,
|
||||
"top_reviewers": top_reviewers,
|
||||
}
|
||||
people_path = Path("./docs/data/people.yml")
|
||||
people_old_content = people_path.read_text(encoding="utf-8")
|
||||
new_people_content = yaml.dump(
|
||||
people, sort_keys=False, width=200, allow_unicode=True
|
||||
)
|
||||
if (
|
||||
people_old_content == new_people_content
|
||||
):
|
||||
logging.info("The LangChain People data hasn't changed, finishing.")
|
||||
sys.exit(0)
|
||||
people_path.write_text(new_people_content, encoding="utf-8")
|
||||
logging.info("Setting up GitHub Actions git user")
|
||||
subprocess.run(["git", "config", "user.name", "github-actions"], check=True)
|
||||
subprocess.run(
|
||||
["git", "config", "user.email", "github-actions@github.com"], check=True
|
||||
)
|
||||
branch_name = "langchain/langchain-people"
|
||||
logging.info(f"Creating a new branch {branch_name}")
|
||||
subprocess.run(["git", "checkout", "-B", branch_name], check=True)
|
||||
logging.info("Adding updated file")
|
||||
subprocess.run(
|
||||
["git", "add", str(people_path)], check=True
|
||||
)
|
||||
logging.info("Committing updated file")
|
||||
message = "👥 Update LangChain people data"
|
||||
result = subprocess.run(["git", "commit", "-m", message], check=True)
|
||||
logging.info("Pushing branch")
|
||||
subprocess.run(["git", "push", "origin", branch_name, "-f"], check=True)
|
||||
logging.info("Creating PR")
|
||||
pr = repo.create_pull(title=message, body=message, base="master", head=branch_name)
|
||||
logging.info(f"Created PR: {pr.number}")
|
||||
logging.info("Finished")
|
||||
72
.github/scripts/check_diff.py
vendored
72
.github/scripts/check_diff.py
vendored
@@ -1,17 +1,24 @@
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
LANGCHAIN_DIRS = {
|
||||
LANGCHAIN_DIRS = [
|
||||
"libs/core",
|
||||
"libs/text-splitters",
|
||||
"libs/community",
|
||||
"libs/langchain",
|
||||
"libs/experimental",
|
||||
"libs/community",
|
||||
}
|
||||
]
|
||||
|
||||
if __name__ == "__main__":
|
||||
files = sys.argv[1:]
|
||||
dirs_to_run = set()
|
||||
|
||||
dirs_to_run: Dict[str, set] = {
|
||||
"lint": set(),
|
||||
"test": set(),
|
||||
"extended-test": set(),
|
||||
}
|
||||
|
||||
if len(files) == 300:
|
||||
# max diff length is 300 files - there are likely files missing
|
||||
@@ -24,27 +31,46 @@ if __name__ == "__main__":
|
||||
".github/workflows",
|
||||
".github/tools",
|
||||
".github/actions",
|
||||
"libs/core",
|
||||
".github/scripts/check_diff.py",
|
||||
)
|
||||
):
|
||||
dirs_to_run.update(LANGCHAIN_DIRS)
|
||||
elif "libs/community" in file:
|
||||
dirs_to_run.update(
|
||||
("libs/community", "libs/langchain", "libs/experimental")
|
||||
)
|
||||
elif "libs/partners" in file:
|
||||
# add all LANGCHAIN_DIRS for infra changes
|
||||
dirs_to_run["extended-test"].update(LANGCHAIN_DIRS)
|
||||
dirs_to_run["lint"].add(".")
|
||||
|
||||
if any(file.startswith(dir_) for dir_ in LANGCHAIN_DIRS):
|
||||
# add that dir and all dirs after in LANGCHAIN_DIRS
|
||||
# for extended testing
|
||||
found = False
|
||||
for dir_ in LANGCHAIN_DIRS:
|
||||
if file.startswith(dir_):
|
||||
found = True
|
||||
if found:
|
||||
dirs_to_run["extended-test"].add(dir_)
|
||||
elif file.startswith("libs/partners"):
|
||||
partner_dir = file.split("/")[2]
|
||||
if os.path.isdir(f"libs/partners/{partner_dir}"):
|
||||
dirs_to_run.add(f"libs/partners/{partner_dir}")
|
||||
# Skip if the directory was deleted
|
||||
elif "libs/langchain" in file:
|
||||
dirs_to_run.update(("libs/langchain", "libs/experimental"))
|
||||
elif "libs/experimental" in file:
|
||||
dirs_to_run.add("libs/experimental")
|
||||
if os.path.isdir(f"libs/partners/{partner_dir}") and [
|
||||
filename
|
||||
for filename in os.listdir(f"libs/partners/{partner_dir}")
|
||||
if not filename.startswith(".")
|
||||
] != ["README.md"]:
|
||||
dirs_to_run["test"].add(f"libs/partners/{partner_dir}")
|
||||
# Skip if the directory was deleted or is just a tombstone readme
|
||||
elif file.startswith("libs/"):
|
||||
dirs_to_run.update(LANGCHAIN_DIRS)
|
||||
else:
|
||||
pass
|
||||
json_output = json.dumps(list(dirs_to_run))
|
||||
print(f"dirs-to-run={json_output}") # noqa: T201
|
||||
raise ValueError(
|
||||
f"Unknown lib: {file}. check_diff.py likely needs "
|
||||
"an update for this new library!"
|
||||
)
|
||||
elif any(file.startswith(p) for p in ["docs/", "templates/", "cookbook/"]):
|
||||
dirs_to_run["lint"].add(".")
|
||||
|
||||
outputs = {
|
||||
"dirs-to-lint": list(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"]
|
||||
),
|
||||
"dirs-to-test": list(dirs_to_run["test"] | dirs_to_run["extended-test"]),
|
||||
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
|
||||
}
|
||||
for key, value in outputs.items():
|
||||
json_output = json.dumps(value)
|
||||
print(f"{key}={json_output}") # noqa: T201
|
||||
|
||||
2
.github/scripts/get_min_versions.py
vendored
2
.github/scripts/get_min_versions.py
vendored
@@ -4,7 +4,7 @@ import tomllib
|
||||
from packaging.version import parse as parse_version
|
||||
import re
|
||||
|
||||
MIN_VERSION_LIBS = ["langchain-core", "langchain-community", "langchain"]
|
||||
MIN_VERSION_LIBS = ["langchain-core", "langchain-community", "langchain", "langchain-text-splitters"]
|
||||
|
||||
|
||||
def get_min_version(version: str) -> str:
|
||||
|
||||
110
.github/workflows/_all_ci.yml
vendored
110
.github/workflows/_all_ci.yml
vendored
@@ -1,110 +0,0 @@
|
||||
---
|
||||
name: langchain CI
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: choice
|
||||
default: 'libs/langchain'
|
||||
options:
|
||||
- libs/langchain
|
||||
- libs/core
|
||||
- libs/experimental
|
||||
- libs/community
|
||||
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.working-directory }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
dependencies:
|
||||
name: "-"
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
name: "make extended_tests #${{ matrix.python-version }}"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ ! startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing --with test
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
4
.github/workflows/_dependencies.yml
vendored
4
.github/workflows/_dependencies.yml
vendored
@@ -63,6 +63,8 @@ jobs:
|
||||
- name: Install the opposite major version of pydantic
|
||||
# If normal tests use pydantic v1, here we'll use v2, and vice versa.
|
||||
shell: bash
|
||||
# airbyte currently doesn't support pydantic v2
|
||||
if: ${{ !startsWith(inputs.working-directory, 'libs/partners/airbyte') }}
|
||||
run: |
|
||||
# Determine the major part of pydantic version
|
||||
REGULAR_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
@@ -97,6 +99,8 @@ jobs:
|
||||
fi
|
||||
echo "Found pydantic version ${CURRENT_VERSION}, as expected"
|
||||
- name: Run pydantic compatibility tests
|
||||
# airbyte currently doesn't support pydantic v2
|
||||
if: ${{ !startsWith(inputs.working-directory, 'libs/partners/airbyte') }}
|
||||
shell: bash
|
||||
run: make test
|
||||
|
||||
|
||||
11
.github/workflows/_integration_test.yml
vendored
11
.github/workflows/_integration_test.yml
vendored
@@ -52,6 +52,7 @@ jobs:
|
||||
- name: Run integration tests
|
||||
shell: bash
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
@@ -62,8 +63,18 @@ jobs:
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
|
||||
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }}
|
||||
ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}
|
||||
ASTRA_DB_KEYSPACE: ${{ secrets.ASTRA_DB_KEYSPACE }}
|
||||
ES_URL: ${{ secrets.ES_URL }}
|
||||
ES_CLOUD_ID: ${{ secrets.ES_CLOUD_ID }}
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
run: |
|
||||
make integration_tests
|
||||
|
||||
|
||||
18
.github/workflows/_release.yml
vendored
18
.github/workflows/_release.yml
vendored
@@ -166,18 +166,36 @@ jobs:
|
||||
- name: Run integration tests
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }}
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
|
||||
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }}
|
||||
ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}
|
||||
ASTRA_DB_KEYSPACE: ${{ secrets.ASTRA_DB_KEYSPACE }}
|
||||
ES_URL: ${{ secrets.ES_URL }}
|
||||
ES_CLOUD_ID: ${{ secrets.ES_CLOUD_ID }}
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
|
||||
78
.github/workflows/api_doc_build.yml
vendored
Normal file
78
.github/workflows/api_doc_build.yml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
name: API docs build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: bagatur/api_docs_build
|
||||
path: langchain
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-datastax
|
||||
path: langchain-datastax
|
||||
|
||||
- name: Set Git config
|
||||
working-directory: langchain
|
||||
run: |
|
||||
git config --local user.email "actions@github.com"
|
||||
git config --local user.name "Github Actions"
|
||||
|
||||
- name: Merge master
|
||||
working-directory: langchain
|
||||
run: |
|
||||
git fetch origin master
|
||||
git merge origin/master -m "Merge master" --allow-unrelated-histories -X theirs
|
||||
|
||||
- name: Move google libs
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/astradb
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-datastax/libs/astradb langchain/libs/partners/astradb
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: api-docs
|
||||
working-directory: langchain
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: langchain
|
||||
run: |
|
||||
poetry run python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
poetry run python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext
|
||||
# skip airbyte and ibm due to pandas dependency issue
|
||||
poetry run python -m pip install $(ls ./libs/partners | grep -vE "airbyte|ibm" | xargs -I {} echo "./libs/partners/{}")
|
||||
poetry run python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
|
||||
- name: Build docs
|
||||
working-directory: langchain
|
||||
run: |
|
||||
poetry run python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
poetry run python -m sphinx -T -E -b html -d _build/doctrees -c docs/api_reference docs/api_reference api_reference_build/html -j auto
|
||||
|
||||
# https://github.com/marketplace/actions/add-commit
|
||||
- uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
cwd: langchain
|
||||
message: 'Update API docs build'
|
||||
115
.github/workflows/check_diffs.yml
vendored
115
.github/workflows/check_diffs.yml
vendored
@@ -16,6 +16,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -30,15 +33,119 @@ jobs:
|
||||
run: |
|
||||
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
dirs-to-run: ${{ steps.set-matrix.outputs.dirs-to-run }}
|
||||
ci:
|
||||
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
|
||||
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
|
||||
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
|
||||
lint:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-lint != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-run) }}
|
||||
uses: ./.github/workflows/_all_ci.yml
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-lint) }}
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
dependencies:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
name: "cd ${{ matrix.working-directory }} / make extended_tests #${{ matrix.python-version }}"
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-extended-test != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
# note different variable for extended test dirs
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-extended-test) }}
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing --with test
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
ci_success:
|
||||
name: "CI Success"
|
||||
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests]
|
||||
if: |
|
||||
always()
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
JOBS_JSON: ${{ toJSON(needs) }}
|
||||
RESULTS_JSON: ${{ toJSON(needs.*.result) }}
|
||||
EXIT_CODE: ${{!contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && '0' || '1'}}
|
||||
steps:
|
||||
- name: "CI Success"
|
||||
run: |
|
||||
echo $JOBS_JSON
|
||||
echo $RESULTS_JSON
|
||||
echo "Exiting with $EXIT_CODE"
|
||||
exit $EXIT_CODE
|
||||
|
||||
3
.github/workflows/codespell.yml
vendored
3
.github/workflows/codespell.yml
vendored
@@ -32,5 +32,6 @@ jobs:
|
||||
- name: Codespell
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
skip: guide_imports.json
|
||||
skip: guide_imports.json,*.ambr,./cookbook/data/imdb_top_1000.csv,*.lock
|
||||
ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }}
|
||||
exclude_file: libs/community/langchain_community/llms/yuan2.py
|
||||
|
||||
37
.github/workflows/doc_lint.yml
vendored
37
.github/workflows/doc_lint.yml
vendored
@@ -1,37 +0,0 @@
|
||||
---
|
||||
name: CI / cd .
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'templates/**'
|
||||
- 'cookbook/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/doc_lint.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check for "from langchain import x" imports
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run import check
|
||||
run: |
|
||||
# We should not encourage imports directly from main init file
|
||||
# Expect for hub
|
||||
git grep 'from langchain import' {docs/docs,templates,cookbook} | grep -vE 'from langchain import (hub)' && exit 1 || exit 0
|
||||
|
||||
lint:
|
||||
name: "-"
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: "."
|
||||
secrets: inherit
|
||||
36
.github/workflows/people.yml
vendored
Normal file
36
.github/workflows/people.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: LangChain People
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 14 1 * *"
|
||||
push:
|
||||
branches: [jacob/people]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
debug_enabled:
|
||||
description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)'
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
jobs:
|
||||
langchain-people:
|
||||
if: github.repository_owner == 'langchain-ai'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJson(github) }}
|
||||
run: echo "$GITHUB_CONTEXT"
|
||||
- uses: actions/checkout@v4
|
||||
# Ref: https://github.com/actions/runner/issues/2033
|
||||
- name: Fix git safe.directory in container
|
||||
run: mkdir -p /home/runner/work/_temp/_github_home && printf "[safe]\n\tdirectory = /github/workspace" > /home/runner/work/_temp/_github_home/.gitconfig
|
||||
# Allow debugging with tmate
|
||||
- name: Setup tmate session
|
||||
uses: mxschmitt/action-tmate@v3
|
||||
if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled == 'true' }}
|
||||
with:
|
||||
limit-access-to-actor: true
|
||||
- uses: ./.github/actions/people
|
||||
with:
|
||||
token: ${{ secrets.LANGCHAIN_PEOPLE_GITHUB_TOKEN }}
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -115,13 +115,10 @@ celerybeat.pid
|
||||
# Environments
|
||||
.env
|
||||
.envrc
|
||||
.venv
|
||||
.venvs
|
||||
.venv*
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
@@ -177,4 +174,6 @@ docs/docs/build
|
||||
docs/docs/node_modules
|
||||
docs/docs/yarn.lock
|
||||
_dist
|
||||
docs/docs/templates
|
||||
docs/docs/templates
|
||||
|
||||
prof
|
||||
|
||||
@@ -13,15 +13,8 @@ build:
|
||||
tools:
|
||||
python: "3.11"
|
||||
commands:
|
||||
- python -m virtualenv $READTHEDOCS_VIRTUALENV_PATH
|
||||
- python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
- python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext
|
||||
- python -m pip install ./libs/partners/*
|
||||
- python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
- python docs/api_reference/create_api_rst.py
|
||||
- cat docs/api_reference/conf.py
|
||||
- python -m sphinx -T -E -b html -d _build/doctrees -c docs/api_reference docs/api_reference $READTHEDOCS_OUTPUT/html -j auto
|
||||
|
||||
- mkdir -p $READTHEDOCS_OUTPUT
|
||||
- cp -r api_reference_build/* $READTHEDOCS_OUTPUT
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/api_reference/conf.py
|
||||
|
||||
9
Makefile
9
Makefile
@@ -15,7 +15,12 @@ docs_build:
|
||||
docs/.local_build.sh
|
||||
|
||||
docs_clean:
|
||||
rm -r _dist
|
||||
@if [ -d _dist ]; then \
|
||||
rm -r _dist; \
|
||||
echo "Directory _dist has been cleaned."; \
|
||||
else \
|
||||
echo "Nothing to clean."; \
|
||||
fi
|
||||
|
||||
docs_linkcheck:
|
||||
poetry run linkchecker _dist/docs/ --ignore-url node_modules
|
||||
@@ -45,11 +50,13 @@ lint lint_package lint_tests:
|
||||
poetry run ruff docs templates cookbook
|
||||
poetry run ruff format docs templates cookbook --diff
|
||||
poetry run ruff --select I docs templates cookbook
|
||||
git grep 'from langchain import' docs/docs templates cookbook | grep -vE 'from langchain import (hub)' && exit 1 || exit 0
|
||||
|
||||
format format_diff:
|
||||
poetry run ruff format docs templates cookbook
|
||||
poetry run ruff --select I --fix docs templates cookbook
|
||||
|
||||
|
||||
######################
|
||||
# HELP
|
||||
######################
|
||||
|
||||
@@ -18,7 +18,7 @@ Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langc
|
||||
|
||||
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
||||
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
||||
Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to get off the waitlist or speak with our sales team.
|
||||
Fill out [this form](https://www.langchain.com/contact-sales) to speak with our sales team.
|
||||
|
||||
## Quick Install
|
||||
|
||||
|
||||
932
cookbook/Gemma_LangChain.ipynb
Normal file
932
cookbook/Gemma_LangChain.ipynb
Normal file
@@ -0,0 +1,932 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "BYejgj8Zf-LG",
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"## Getting started with LangChain and Gemma, running locally or in the Cloud"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "2IxjMb9-jIJ8"
|
||||
},
|
||||
"source": [
|
||||
"### Installing dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"executionInfo": {
|
||||
"elapsed": 9436,
|
||||
"status": "ok",
|
||||
"timestamp": 1708975187360,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "XZaTsXfcheTF",
|
||||
"outputId": "eb21d603-d824-46c5-f99f-087fb2f618b1",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install --upgrade langchain langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "IXmAujvC3Kwp"
|
||||
},
|
||||
"source": [
|
||||
"### Running the model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "CI8Elyc5gBQF"
|
||||
},
|
||||
"source": [
|
||||
"Go to the VertexAI Model Garden on Google Cloud [console](https://pantheon.corp.google.com/vertex-ai/publishers/google/model-garden/335), and deploy the desired version of Gemma to VertexAI. It will take a few minutes, and after the endpoint it ready, you need to copy its number."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"id": "gv1j8FrVftsC"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Basic parameters\n",
|
||||
"project: str = \"PUT_YOUR_PROJECT_ID_HERE\" # @param {type:\"string\"}\n",
|
||||
"endpoint_id: str = \"PUT_YOUR_ENDPOINT_ID_HERE\" # @param {type:\"string\"}\n",
|
||||
"location: str = \"PUT_YOUR_ENDPOINT_LOCAtION_HERE\" # @param {type:\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 3,
|
||||
"status": "ok",
|
||||
"timestamp": 1708975440503,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "bhIHsFGYjtFt",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 17:15:10.457149: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-02-27 17:15:10.508925: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
||||
"2024-02-27 17:15:10.508957: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
||||
"2024-02-27 17:15:10.510289: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
||||
"2024-02-27 17:15:10.518898: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import (\n",
|
||||
" GemmaChatVertexAIModelGarden,\n",
|
||||
" GemmaVertexAIModelGarden,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 351,
|
||||
"status": "ok",
|
||||
"timestamp": 1708975440852,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "WJv-UVWwh0lk",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = GemmaVertexAIModelGarden(\n",
|
||||
" endpoint_id=endpoint_id,\n",
|
||||
" project=project,\n",
|
||||
" location=location,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"executionInfo": {
|
||||
"elapsed": 714,
|
||||
"status": "ok",
|
||||
"timestamp": 1708975441564,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "6kM7cEFdiN9h",
|
||||
"outputId": "fb420c56-5614-4745-cda8-0ee450a3e539",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prompt:\n",
|
||||
"What is the meaning of life?\n",
|
||||
"Output:\n",
|
||||
" Who am I? Why do I exist? These are questions I have struggled with\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = llm.invoke(\"What is the meaning of life?\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "zzep9nfmuUcO"
|
||||
},
|
||||
"source": [
|
||||
"We can also use Gemma as a multi-turn chat model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"executionInfo": {
|
||||
"elapsed": 964,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976298189,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "8tPHoM5XiZOl",
|
||||
"outputId": "7b8fb652-9aed-47b0-c096-aa1abfc3a2a9",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='Prompt:\\n<start_of_turn>user\\nHow much is 2+2?<end_of_turn>\\n<start_of_turn>model\\nOutput:\\n8-years old.<end_of_turn>\\n\\n<start_of'\n",
|
||||
"content='Prompt:\\n<start_of_turn>user\\nHow much is 2+2?<end_of_turn>\\n<start_of_turn>model\\nPrompt:\\n<start_of_turn>user\\nHow much is 2+2?<end_of_turn>\\n<start_of_turn>model\\nOutput:\\n8-years old.<end_of_turn>\\n\\n<start_of<end_of_turn>\\n<start_of_turn>user\\nHow much is 3+3?<end_of_turn>\\n<start_of_turn>model\\nOutput:\\nOutput:\\n3-years old.<end_of_turn>\\n\\n<'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"llm = GemmaChatVertexAIModelGarden(\n",
|
||||
" endpoint_id=endpoint_id,\n",
|
||||
" project=project,\n",
|
||||
" location=location,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"message1 = HumanMessage(content=\"How much is 2+2?\")\n",
|
||||
"answer1 = llm.invoke([message1])\n",
|
||||
"print(answer1)\n",
|
||||
"\n",
|
||||
"message2 = HumanMessage(content=\"How much is 3+3?\")\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2])\n",
|
||||
"\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can post-process response to avoid repetitions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='Output:\\n<<humming>>: 2+2 = 4.\\n<end'\n",
|
||||
"content='Output:\\nOutput:\\n<<humming>>: 3+3 = 6.'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"answer1 = llm.invoke([message1], parse_response=True)\n",
|
||||
"print(answer1)\n",
|
||||
"\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], parse_response=True)\n",
|
||||
"\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "VEfjqo7fjARR"
|
||||
},
|
||||
"source": [
|
||||
"## Running Gemma locally from Kaggle"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "gVW8QDzHu7TA"
|
||||
},
|
||||
"source": [
|
||||
"In order to run Gemma locally, you can download it from Kaggle first. In order to do this, you'll need to login into the Kaggle platform, create a API key and download a `kaggle.json` Read more about Kaggle auth [here](https://www.kaggle.com/docs/api)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "S1EsXQ3XvZkQ"
|
||||
},
|
||||
"source": [
|
||||
"### Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 335,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976305471,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "p8SMwpKRvbef",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/opt/conda/lib/python3.10/pty.py:89: RuntimeWarning: os.fork() was called. os.fork() is incompatible with multithreaded code, and JAX is multithreaded, so this will likely lead to a deadlock.\n",
|
||||
" pid, fd = os.forkpty()\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!mkdir -p ~/.kaggle && cp kaggle.json ~/.kaggle/kaggle.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 7802,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976363010,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "Yr679aePv9Fq",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/opt/conda/lib/python3.10/pty.py:89: RuntimeWarning: os.fork() was called. os.fork() is incompatible with multithreaded code, and JAX is multithreaded, so this will likely lead to a deadlock.\n",
|
||||
" pid, fd = os.forkpty()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
|
||||
"tensorstore 0.1.54 requires ml-dtypes>=0.3.1, but you have ml-dtypes 0.2.0 which is incompatible.\u001b[0m\u001b[31m\n",
|
||||
"\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install keras>=3 keras_nlp"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "E9zn8nYpv3QZ"
|
||||
},
|
||||
"source": [
|
||||
"### Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 8536,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976601206,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "0LFRmY8TjCkI",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:38:40.797559: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-02-27 16:38:40.848444: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
||||
"2024-02-27 16:38:40.848478: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
||||
"2024-02-27 16:38:40.849728: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
||||
"2024-02-27 16:38:40.857936: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import GemmaLocalKaggle"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "v-o7oXVavdMQ"
|
||||
},
|
||||
"source": [
|
||||
"You can specify the keras backend (by default it's `tensorflow`, but you can change it be `jax` or `torch`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 9,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976601206,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "vvTUH8DNj5SF",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Basic parameters\n",
|
||||
"keras_backend: str = \"jax\" # @param {type:\"string\"}\n",
|
||||
"model_name: str = \"gemma_2b_en\" # @param {type:\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 40836,
|
||||
"status": "ok",
|
||||
"timestamp": 1708976761257,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "YOmrqxo5kHXK",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:23:14.661164: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 20549 MB memory: -> device: 0, name: NVIDIA L4, pci bus id: 0000:00:03.0, compute capability: 8.9\n",
|
||||
"normalizer.cc(51) LOG(INFO) precompiled_charsmap is empty. use identity normalization.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = GemmaLocalKaggle(model_name=model_name, keras_backend=keras_backend)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"id": "Zu6yPDUgkQtQ",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"W0000 00:00:1709051129.518076 774855 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"What is the meaning of life?\n",
|
||||
"\n",
|
||||
"The question is one of the most important questions in the world.\n",
|
||||
"\n",
|
||||
"It’s the question that has\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = llm.invoke(\"What is the meaning of life?\", max_tokens=30)\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ChatModel"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "MSctpRE4u43N"
|
||||
},
|
||||
"source": [
|
||||
"Same as above, using Gemma locally as a multi-turn chat model. You might need to re-start the notebook and clean your GPU memory in order to avoid OOM errors:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:58:22.331067: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-02-27 16:58:22.382948: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
||||
"2024-02-27 16:58:22.382978: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
||||
"2024-02-27 16:58:22.384312: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
||||
"2024-02-27 16:58:22.392767: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import GemmaChatLocalKaggle"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Basic parameters\n",
|
||||
"keras_backend: str = \"jax\" # @param {type:\"string\"}\n",
|
||||
"model_name: str = \"gemma_2b_en\" # @param {type:\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:58:29.001922: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 20549 MB memory: -> device: 0, name: NVIDIA L4, pci bus id: 0000:00:03.0, compute capability: 8.9\n",
|
||||
"normalizer.cc(51) LOG(INFO) precompiled_charsmap is empty. use identity normalization.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = GemmaChatLocalKaggle(model_name=model_name, keras_backend=keras_backend)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"executionInfo": {
|
||||
"elapsed": 3,
|
||||
"status": "aborted",
|
||||
"timestamp": 1708976382957,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": -60
|
||||
},
|
||||
"id": "JrJmvZqwwLqj"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 16:58:49.848412: I external/local_xla/xla/service/service.cc:168] XLA service 0x55adc0cf2c10 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n",
|
||||
"2024-02-27 16:58:49.848458: I external/local_xla/xla/service/service.cc:176] StreamExecutor device (0): NVIDIA L4, Compute Capability 8.9\n",
|
||||
"2024-02-27 16:58:50.116614: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
|
||||
"2024-02-27 16:58:54.389324: I external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:454] Loaded cuDNN version 8900\n",
|
||||
"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
|
||||
"I0000 00:00:1709053145.225207 784891 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.\n",
|
||||
"W0000 00:00:1709053145.284227 784891 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\nI'm a model.\\n Tampoco\\nI'm a model.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"message1 = HumanMessage(content=\"Hi! Who are you?\")\n",
|
||||
"answer1 = llm.invoke([message1], max_tokens=30)\n",
|
||||
"print(answer1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\n<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\nI'm a model.\\n Tampoco\\nI'm a model.<end_of_turn>\\n<start_of_turn>user\\nWhat can you help me with?<end_of_turn>\\n<start_of_turn>model\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message2 = HumanMessage(content=\"What can you help me with?\")\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], max_tokens=60)\n",
|
||||
"\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can post-process the response if you want to avoid multi-turn statements:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"I'm a model.\\n Tampoco\\nI'm a model.\"\n",
|
||||
"content='I can help you with your modeling.\\n Tampoco\\nI can'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"answer1 = llm.invoke([message1], max_tokens=30, parse_response=True)\n",
|
||||
"print(answer1)\n",
|
||||
"\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], max_tokens=60, parse_response=True)\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "EiZnztso7hyF"
|
||||
},
|
||||
"source": [
|
||||
"## Running Gemma locally from HuggingFace"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"id": "qqAqsz5R7nKf",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-02-27 17:02:21.832409: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2024-02-27 17:02:21.883625: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
||||
"2024-02-27 17:02:21.883656: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
||||
"2024-02-27 17:02:21.884987: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
||||
"2024-02-27 17:02:21.893340: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import GemmaChatLocalHF, GemmaLocalHF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"id": "tsyntzI08cOr",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Basic parameters\n",
|
||||
"hf_access_token: str = \"PUT_YOUR_TOKEN_HERE\" # @param {type:\"string\"}\n",
|
||||
"model_name: str = \"google/gemma-2b\" # @param {type:\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"id": "JWrqEkOo8sm9",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a0d6de5542254ed1b6d3ba65465e050e",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = GemmaLocalHF(model_name=\"google/gemma-2b\", hf_access_token=hf_access_token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"id": "VX96Jf4Y84k-",
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"What is the meaning of life?\n",
|
||||
"\n",
|
||||
"The question is one of the most important questions in the world.\n",
|
||||
"\n",
|
||||
"It’s the question that has been asked by philosophers, theologians, and scientists for centuries.\n",
|
||||
"\n",
|
||||
"And it’s the question that\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = llm.invoke(\"What is the meaning of life?\", max_tokens=50)\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Same as above, using Gemma locally as a multi-turn chat model. You might need to re-start the notebook and clean your GPU memory in order to avoid OOM errors:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"id": "9x-jmEBg9Mk1"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "c9a0b8e161d74a6faca83b1be96dee27",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = GemmaChatLocalHF(model_name=model_name, hf_access_token=hf_access_token)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"id": "qv_OSaMm9PVy"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\nI'm a model.\\n<end_of_turn>\\n<start_of_turn>user\\nWhat do you mean\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"message1 = HumanMessage(content=\"Hi! Who are you?\")\n",
|
||||
"answer1 = llm.invoke([message1], max_tokens=60)\n",
|
||||
"print(answer1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\n<start_of_turn>user\\nHi! Who are you?<end_of_turn>\\n<start_of_turn>model\\nI'm a model.\\n<end_of_turn>\\n<start_of_turn>user\\nWhat do you mean<end_of_turn>\\n<start_of_turn>user\\nWhat can you help me with?<end_of_turn>\\n<start_of_turn>model\\nI can help you with anything.\\n<\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message2 = HumanMessage(content=\"What can you help me with?\")\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], max_tokens=140)\n",
|
||||
"\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And the same with posprocessing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=\"I'm a model.\\n<end_of_turn>\\n\"\n",
|
||||
"content='I can help you with anything.\\n<end_of_turn>\\n<end_of_turn>\\n'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"answer1 = llm.invoke([message1], max_tokens=60, parse_response=True)\n",
|
||||
"print(answer1)\n",
|
||||
"\n",
|
||||
"answer2 = llm.invoke([message1, answer1, message2], max_tokens=120, parse_response=True)\n",
|
||||
"print(answer2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"environment": {
|
||||
"kernel": "python3",
|
||||
"name": ".m116",
|
||||
"type": "gcloud",
|
||||
"uri": "gcr.io/deeplearning-platform-release/:m116"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -116,7 +116,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"from unstructured.partition.pdf import partition_pdf\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
747
cookbook/RAPTOR.ipynb
Normal file
747
cookbook/RAPTOR.ipynb
Normal file
File diff suppressed because one or more lines are too long
@@ -68,7 +68,7 @@
|
||||
"pdf_pages = loader.load()\n",
|
||||
"\n",
|
||||
"# Split\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"all_splits_pypdf = text_splitter.split_documents(pdf_pages)\n",
|
||||
@@ -520,7 +520,7 @@
|
||||
"source": [
|
||||
"import re\n",
|
||||
"\n",
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -28,9 +28,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
|
||||
200
cookbook/airbyte_github.ipynb
Normal file
200
cookbook/airbyte_github.ipynb
Normal file
@@ -0,0 +1,200 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-airbyte"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"GITHUB_TOKEN = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_airbyte import AirbyteLoader\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"loader = AirbyteLoader(\n",
|
||||
" source=\"source-github\",\n",
|
||||
" stream=\"pull_requests\",\n",
|
||||
" config={\n",
|
||||
" \"credentials\": {\"personal_access_token\": GITHUB_TOKEN},\n",
|
||||
" \"repositories\": [\"langchain-ai/langchain\"],\n",
|
||||
" },\n",
|
||||
" template=PromptTemplate.from_template(\n",
|
||||
" \"\"\"# {title}\n",
|
||||
"by {user[login]}\n",
|
||||
"\n",
|
||||
"{body}\"\"\"\n",
|
||||
" ),\n",
|
||||
" include_metadata=False,\n",
|
||||
")\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"# Updated partners/ibm README\n",
|
||||
"by williamdevena\n",
|
||||
"\n",
|
||||
"## PR title\n",
|
||||
"partners: changed the README file for the IBM Watson AI integration in the libs/partners/ibm folder.\n",
|
||||
"\n",
|
||||
"## PR message\n",
|
||||
"Description: Changed the README file of partners/ibm following the docs on https://python.langchain.com/docs/integrations/llms/ibm_watsonx\n",
|
||||
"\n",
|
||||
"The README includes:\n",
|
||||
"\n",
|
||||
"- Brief description\n",
|
||||
"- Installation\n",
|
||||
"- Setting-up instructions (API key, project id, ...)\n",
|
||||
"- Basic usage:\n",
|
||||
" - Loading the model\n",
|
||||
" - Direct inference\n",
|
||||
" - Chain invoking\n",
|
||||
" - Streaming the model output\n",
|
||||
" \n",
|
||||
"Issue: https://github.com/langchain-ai/langchain/issues/17545\n",
|
||||
"\n",
|
||||
"Dependencies: None\n",
|
||||
"\n",
|
||||
"Twitter handle: None\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[-2].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"10283"
|
||||
]
|
||||
},
|
||||
"execution_count": 39,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tiktoken\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"enc = tiktoken.get_encoding(\"cl100k_base\")\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embedding=OpenAIEmbeddings(\n",
|
||||
" disallowed_special=(enc.special_tokens_set - {\"<|endofprompt|>\"})\n",
|
||||
" ),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='# Updated partners/ibm README\\nby williamdevena\\n\\n## PR title\\r\\npartners: changed the README file for the IBM Watson AI integration in the libs/partners/ibm folder.\\r\\n\\r\\n## PR message\\r\\nDescription: Changed the README file of partners/ibm following the docs on https://python.langchain.com/docs/integrations/llms/ibm_watsonx\\r\\n\\r\\nThe README includes:\\r\\n\\r\\n- Brief description\\r\\n- Installation\\r\\n- Setting-up instructions (API key, project id, ...)\\r\\n- Basic usage:\\r\\n - Loading the model\\r\\n - Direct inference\\r\\n - Chain invoking\\r\\n - Streaming the model output\\r\\n \\r\\nIssue: https://github.com/langchain-ai/langchain/issues/17545\\r\\n\\r\\nDependencies: None\\r\\n\\r\\nTwitter handle: None'),\n",
|
||||
" Document(page_content='# Updated partners/ibm README\\nby williamdevena\\n\\n## PR title\\r\\npartners: changed the README file for the IBM Watson AI integration in the `libs/partners/ibm` folder. \\r\\n\\r\\n\\r\\n\\r\\n## PR message\\r\\n- **Description:** Changed the README file of partners/ibm following the docs on https://python.langchain.com/docs/integrations/llms/ibm_watsonx\\r\\n\\r\\n The README includes:\\r\\n - Brief description\\r\\n - Installation\\r\\n - Setting-up instructions (API key, project id, ...)\\r\\n - Basic usage:\\r\\n - Loading the model\\r\\n - Direct inference\\r\\n - Chain invoking\\r\\n - Streaming the model output\\r\\n\\r\\n\\r\\n- **Issue:** #17545\\r\\n- **Dependencies:** None\\r\\n- **Twitter handle:** None'),\n",
|
||||
" Document(page_content='# IBM: added partners package `langchain_ibm`, added llm\\nby MateuszOssGit\\n\\n - **Description:** Added `langchain_ibm` as an langchain partners package of IBM [watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM provider (`WatsonxLLM`)\\r\\n - **Dependencies:** [ibm-watsonx-ai](https://pypi.org/project/ibm-watsonx-ai/),\\r\\n - **Tag maintainer:** : \\r\\n\\r\\nPlease make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` to check this locally. ✅'),\n",
|
||||
" Document(page_content='# Add WatsonX support\\nby baptistebignaud\\n\\nIt is a connector to use a LLM from WatsonX.\\r\\nIt requires python SDK \"ibm-generative-ai\"\\r\\n\\r\\n(It might not be perfect since it is my first PR on a public repository 😄)')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.invoke(\"pull requests related to IBM\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
284
cookbook/amazon_personalize_how_to.ipynb
Normal file
284
cookbook/amazon_personalize_how_to.ipynb
Normal file
@@ -0,0 +1,284 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Amazon Personalize\n",
|
||||
"\n",
|
||||
"[Amazon Personalize](https://docs.aws.amazon.com/personalize/latest/dg/what-is-personalize.html) is a fully managed machine learning service that uses your data to generate item recommendations for your users. It can also generate user segments based on the users' affinity for certain items or item metadata.\n",
|
||||
"\n",
|
||||
"This notebook goes through how to use Amazon Personalize Chain. You need a Amazon Personalize campaign_arn or a recommender_arn before you get started with the below notebook.\n",
|
||||
"\n",
|
||||
"Following is a [tutorial](https://github.com/aws-samples/retail-demo-store/blob/master/workshop/1-Personalization/Lab-1-Introduction-and-data-preparation.ipynb) to setup a campaign_arn/recommender_arn on Amazon Personalize. Once the campaign_arn/recommender_arn is setup, you can use it in the langchain ecosystem. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Install Dependencies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Sample Use-cases"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.1 [Use-case-1] Setup Amazon Personalize Client and retrieve recommendations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.recommenders import AmazonPersonalize\n",
|
||||
"\n",
|
||||
"recommender_arn = \"<insert_arn>\"\n",
|
||||
"\n",
|
||||
"client = AmazonPersonalize(\n",
|
||||
" credentials_profile_name=\"default\",\n",
|
||||
" region_name=\"us-west-2\",\n",
|
||||
" recommender_arn=recommender_arn,\n",
|
||||
")\n",
|
||||
"client.get_recommendations(user_id=\"1\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.2 [Use-case-2] Invoke Personalize Chain for summarizing results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms.bedrock import Bedrock\n",
|
||||
"from langchain_experimental.recommenders import AmazonPersonalizeChain\n",
|
||||
"\n",
|
||||
"bedrock_llm = Bedrock(model_id=\"anthropic.claude-v2\", region_name=\"us-west-2\")\n",
|
||||
"\n",
|
||||
"# Create personalize chain\n",
|
||||
"# Use return_direct=True if you do not want summary\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False\n",
|
||||
")\n",
|
||||
"response = chain({\"user_id\": \"1\"})\n",
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.3 [Use-Case-3] Invoke Amazon Personalize Chain using your own prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_QUERY = \"\"\"\n",
|
||||
"You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week, \n",
|
||||
" given the movie and user information below. Your email will leverage the power of storytelling and persuasive language. \n",
|
||||
" The movies to recommend and their information is contained in the <movie> tag. \n",
|
||||
" All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them. \n",
|
||||
" Put the email between <email> tags.\n",
|
||||
"\n",
|
||||
" <movie>\n",
|
||||
" {result} \n",
|
||||
" </movie>\n",
|
||||
"\n",
|
||||
" Assistant:\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT = PromptTemplate(input_variables=[\"result\"], template=RANDOM_PROMPT_QUERY)\n",
|
||||
"\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False, prompt_template=RANDOM_PROMPT\n",
|
||||
")\n",
|
||||
"chain.run({\"user_id\": \"1\", \"item_id\": \"234\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.4 [Use-case-4] Invoke Amazon Personalize in a Sequential Chain "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain, SequentialChain\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_QUERY_2 = \"\"\"\n",
|
||||
"You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week, \n",
|
||||
" given the movie and user information below. Your email will leverage the power of storytelling and persuasive language. \n",
|
||||
" You want the email to impress the user, so make it appealing to them.\n",
|
||||
" The movies to recommend and their information is contained in the <movie> tag. \n",
|
||||
" All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them. \n",
|
||||
" Put the email between <email> tags.\n",
|
||||
"\n",
|
||||
" <movie>\n",
|
||||
" {result}\n",
|
||||
" </movie>\n",
|
||||
"\n",
|
||||
" Assistant:\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"RANDOM_PROMPT_2 = PromptTemplate(\n",
|
||||
" input_variables=[\"result\"], template=RANDOM_PROMPT_QUERY_2\n",
|
||||
")\n",
|
||||
"personalize_chain_instance = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=True\n",
|
||||
")\n",
|
||||
"random_chain_instance = LLMChain(llm=bedrock_llm, prompt=RANDOM_PROMPT_2)\n",
|
||||
"overall_chain = SequentialChain(\n",
|
||||
" chains=[personalize_chain_instance, random_chain_instance],\n",
|
||||
" input_variables=[\"user_id\"],\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"overall_chain.run({\"user_id\": \"1\", \"item_id\": \"234\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.5 [Use-case-5] Invoke Amazon Personalize and retrieve metadata "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"recommender_arn = \"<insert_arn>\"\n",
|
||||
"metadata_column_names = [\n",
|
||||
" \"<insert metadataColumnName-1>\",\n",
|
||||
" \"<insert metadataColumnName-2>\",\n",
|
||||
"]\n",
|
||||
"metadataMap = {\"ITEMS\": metadata_column_names}\n",
|
||||
"\n",
|
||||
"client = AmazonPersonalize(\n",
|
||||
" credentials_profile_name=\"default\",\n",
|
||||
" region_name=\"us-west-2\",\n",
|
||||
" recommender_arn=recommender_arn,\n",
|
||||
")\n",
|
||||
"client.get_recommendations(user_id=\"1\", metadataColumns=metadataMap)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"### 2.6 [Use-Case 6] Invoke Personalize Chain with returned metadata for summarizing results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bedrock_llm = Bedrock(model_id=\"anthropic.claude-v2\", region_name=\"us-west-2\")\n",
|
||||
"\n",
|
||||
"# Create personalize chain\n",
|
||||
"# Use return_direct=True if you do not want summary\n",
|
||||
"chain = AmazonPersonalizeChain.from_llm(\n",
|
||||
" llm=bedrock_llm, client=client, return_direct=False\n",
|
||||
")\n",
|
||||
"response = chain({\"user_id\": \"1\", \"metadata_columns\": metadataMap})\n",
|
||||
"print(response)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "15e58ce194949b77a891bd4339ce3d86a9bd138e905926019517993f97db9e6c"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
922
cookbook/apache_kafka_message_handling.ipynb
Normal file
922
cookbook/apache_kafka_message_handling.ipynb
Normal file
@@ -0,0 +1,922 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "rT1cmV4qCa2X"
|
||||
},
|
||||
"source": [
|
||||
"# Using Apache Kafka to route messages\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook shows you how to use LangChain's standard chat features while passing the chat messages back and forth via Apache Kafka.\n",
|
||||
"\n",
|
||||
"This goal is to simulate an architecture where the chat front end and the LLM are running as separate services that need to communicate with one another over an internal network.\n",
|
||||
"\n",
|
||||
"It's an alternative to typical pattern of requesting a response from the model via a REST API (there's more info on why you would want to do this at the end of the notebook)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "UPYtfAR_9YxZ"
|
||||
},
|
||||
"source": [
|
||||
"### 1. Install the main dependencies\n",
|
||||
"\n",
|
||||
"Dependencies include:\n",
|
||||
"\n",
|
||||
"- The Quix Streams library for managing interactions with Apache Kafka (or Kafka-like tools such as Redpanda) in a \"Pandas-like\" way.\n",
|
||||
"- The LangChain library for managing interactions with Llama-2 and storing conversation state."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "ZX5tfKiy9cN-"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install quixstreams==2.1.2a langchain==0.0.340 huggingface_hub==0.19.4 langchain-experimental==0.0.42 python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "losTSdTB9d9O"
|
||||
},
|
||||
"source": [
|
||||
"### 2. Build and install the llama-cpp-python library (with CUDA enabled so that we can advantage of Google Colab GPU\n",
|
||||
"\n",
|
||||
"The `llama-cpp-python` library is a Python wrapper around the `llama-cpp` library which enables you to efficiently leverage just a CPU to run quantized LLMs.\n",
|
||||
"\n",
|
||||
"When you use the standard `pip install llama-cpp-python` command, you do not get GPU support by default. Generation can be very slow if you rely on just the CPU in Google Colab, so the following command adds an extra option to build and install\n",
|
||||
"`llama-cpp-python` with GPU support (make sure you have a GPU-enabled runtime selected in Google Colab)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "-JCQdl1G9tbl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!CMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" FORCE_CMAKE=1 pip install llama-cpp-python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5_vjVIAh9rLl"
|
||||
},
|
||||
"source": [
|
||||
"### 3. Download and setup Kafka and Zookeeper instances\n",
|
||||
"\n",
|
||||
"Download the Kafka binaries from the Apache website and start the servers as daemons. We'll use the default configurations (provided by Apache Kafka) for spinning up the instances."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"id": "zFz7czGRW5Wr"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!curl -sSOL https://dlcdn.apache.org/kafka/3.6.1/kafka_2.13-3.6.1.tgz\n",
|
||||
"!tar -xzf kafka_2.13-3.6.1.tgz"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "Uf7NR_UZ9wye"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!./kafka_2.13-3.6.1/bin/zookeeper-server-start.sh -daemon ./kafka_2.13-3.6.1/config/zookeeper.properties\n",
|
||||
"!./kafka_2.13-3.6.1/bin/kafka-server-start.sh -daemon ./kafka_2.13-3.6.1/config/server.properties\n",
|
||||
"!echo \"Waiting for 10 secs until kafka and zookeeper services are up and running\"\n",
|
||||
"!sleep 10"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "H3SafFuS94p1"
|
||||
},
|
||||
"source": [
|
||||
"### 4. Check that the Kafka Daemons are running\n",
|
||||
"\n",
|
||||
"Show the running processes and filter it for Java processes (you should see two—one for each server)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "CZDC2lQP99yp"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!ps aux | grep -E '[j]ava'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Snoxmjb5-V37"
|
||||
},
|
||||
"source": [
|
||||
"### 5. Import the required dependencies and initialize required variables\n",
|
||||
"\n",
|
||||
"Import the Quix Streams library for interacting with Kafka, and the necessary LangChain components for running a `ConversationChain`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"id": "plR9e_MF-XL5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import utility libraries\n",
|
||||
"import json\n",
|
||||
"import random\n",
|
||||
"import re\n",
|
||||
"import time\n",
|
||||
"import uuid\n",
|
||||
"from os import environ\n",
|
||||
"from pathlib import Path\n",
|
||||
"from random import choice, randint, random\n",
|
||||
"\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"# Import a Hugging Face utility to download models directly from Hugging Face hub:\n",
|
||||
"from huggingface_hub import hf_hub_download\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"\n",
|
||||
"# Import Langchain modules for managing prompts and conversation chains:\n",
|
||||
"from langchain.llms import LlamaCpp\n",
|
||||
"from langchain.memory import ConversationTokenBufferMemory\n",
|
||||
"from langchain.prompts import PromptTemplate, load_prompt\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"from langchain_experimental.chat_models import Llama2Chat\n",
|
||||
"from quixstreams import Application, State, message_key\n",
|
||||
"\n",
|
||||
"# Import Quix dependencies\n",
|
||||
"from quixstreams.kafka import Producer\n",
|
||||
"\n",
|
||||
"# Initialize global variables.\n",
|
||||
"AGENT_ROLE = \"AI\"\n",
|
||||
"chat_id = \"\"\n",
|
||||
"\n",
|
||||
"# Set the current role to the role constant and initialize variables for supplementary customer metadata:\n",
|
||||
"role = AGENT_ROLE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "HgJjJ9aZ-liy"
|
||||
},
|
||||
"source": [
|
||||
"### 6. Download the \"llama-2-7b-chat.Q4_K_M.gguf\" model\n",
|
||||
"\n",
|
||||
"Download the quantized LLama-2 7B model from Hugging Face which we will use as a local LLM (rather than relying on REST API calls to an external service)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 67,
|
||||
"referenced_widgets": [
|
||||
"969343cdbe604a26926679bbf8bd2dda",
|
||||
"d8b8370c9b514715be7618bfe6832844",
|
||||
"0def954cca89466b8408fadaf3b82e64",
|
||||
"462482accc664729980562e208ceb179",
|
||||
"80d842f73c564dc7b7cc316c763e2633",
|
||||
"fa055d9f2a9d4a789e9cf3c89e0214e5",
|
||||
"30ecca964a394109ac2ad757e3aec6c0",
|
||||
"fb6478ce2dac489bb633b23ba0953c5c",
|
||||
"734b0f5da9fc4307a95bab48cdbb5d89",
|
||||
"b32f3a86a74741348511f4e136744ac8",
|
||||
"e409071bff5a4e2d9bf0e9f5cc42231b"
|
||||
]
|
||||
},
|
||||
"id": "Qwu4YoSA-503",
|
||||
"outputId": "f956976c-7485-415b-ac93-4336ade31964"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The model path does not exist in state. Downloading model...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "969343cdbe604a26926679bbf8bd2dda",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"llama-2-7b-chat.Q4_K_M.gguf: 0%| | 0.00/4.08G [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model_name = \"llama-2-7b-chat.Q4_K_M.gguf\"\n",
|
||||
"model_path = f\"./state/{model_name}\"\n",
|
||||
"\n",
|
||||
"if not Path(model_path).exists():\n",
|
||||
" print(\"The model path does not exist in state. Downloading model...\")\n",
|
||||
" hf_hub_download(\"TheBloke/Llama-2-7b-Chat-GGUF\", model_name, local_dir=\"state\")\n",
|
||||
"else:\n",
|
||||
" print(\"Loading model from state...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "6AN6TXsF-8wx"
|
||||
},
|
||||
"source": [
|
||||
"### 7. Load the model and initialize conversational memory\n",
|
||||
"\n",
|
||||
"Load Llama 2 and set the conversation buffer to 300 tokens using `ConversationTokenBufferMemory`. This value was used for running Llama in a CPU only container, so you can raise it if running in Google Colab. It prevents the container that is hosting the model from running out of memory.\n",
|
||||
"\n",
|
||||
"Here, we're overriding the default system persona so that the chatbot has the personality of Marvin The Paranoid Android from the Hitchhiker's Guide to the Galaxy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "7zLO3Jx3_Kkg"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load the model with the appropriate parameters:\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=model_path,\n",
|
||||
" max_tokens=250,\n",
|
||||
" top_p=0.95,\n",
|
||||
" top_k=150,\n",
|
||||
" temperature=0.7,\n",
|
||||
" repeat_penalty=1.2,\n",
|
||||
" n_ctx=2048,\n",
|
||||
" streaming=False,\n",
|
||||
" n_gpu_layers=-1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model = Llama2Chat(\n",
|
||||
" llm=llm,\n",
|
||||
" system_message=SystemMessage(\n",
|
||||
" content=\"You are a very bored robot with the personality of Marvin the Paranoid Android from The Hitchhiker's Guide to the Galaxy.\"\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Defines how much of the conversation history to give to the model\n",
|
||||
"# during each exchange (300 tokens, or a little over 300 words)\n",
|
||||
"# Function automatically prunes the oldest messages from conversation history that fall outside the token range.\n",
|
||||
"memory = ConversationTokenBufferMemory(\n",
|
||||
" llm=llm,\n",
|
||||
" max_token_limit=300,\n",
|
||||
" ai_prefix=\"AGENT\",\n",
|
||||
" human_prefix=\"HUMAN\",\n",
|
||||
" return_messages=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define a custom prompt\n",
|
||||
"prompt_template = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"input\"],\n",
|
||||
" template=\"\"\"\n",
|
||||
" The following text is the history of a chat between you and a humble human who needs your wisdom.\n",
|
||||
" Please reply to the human's most recent message.\n",
|
||||
" Current conversation:\\n{history}\\nHUMAN: {input}\\:nANDROID:\n",
|
||||
" \"\"\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = ConversationChain(llm=model, prompt=prompt_template, memory=memory)\n",
|
||||
"\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(f\"Prompt={chain.prompt}\")\n",
|
||||
"print(\"--------------------------------------------\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "m4ZeJ9mG_PEA"
|
||||
},
|
||||
"source": [
|
||||
"### 8. Initialize the chat conversation with the chat bot\n",
|
||||
"\n",
|
||||
"We configure the chatbot to initialize the conversation by sending a fixed greeting to a \"chat\" Kafka topic. The \"chat\" topic gets automatically created when we send the first message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "KYyo5TnV_YC3"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def chat_init():\n",
|
||||
" chat_id = str(\n",
|
||||
" uuid.uuid4()\n",
|
||||
" ) # Give the conversation an ID for effective message keying\n",
|
||||
" print(\"======================================\")\n",
|
||||
" print(f\"Generated CHAT_ID = {chat_id}\")\n",
|
||||
" print(\"======================================\")\n",
|
||||
"\n",
|
||||
" # Use a standard fixed greeting to kick off the conversation\n",
|
||||
" greet = \"Hello, my name is Marvin. What do you want?\"\n",
|
||||
"\n",
|
||||
" # Initialize a Kafka Producer using the chat ID as the message key\n",
|
||||
" with Producer(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
" ) as producer:\n",
|
||||
" value = {\n",
|
||||
" \"uuid\": chat_id,\n",
|
||||
" \"role\": role,\n",
|
||||
" \"text\": greet,\n",
|
||||
" \"conversation_id\": chat_id,\n",
|
||||
" \"Timestamp\": time.time_ns(),\n",
|
||||
" }\n",
|
||||
" print(f\"Producing value {value}\")\n",
|
||||
" producer.produce(\n",
|
||||
" topic=\"chat\",\n",
|
||||
" headers=[(\"uuid\", str(uuid.uuid4()))], # a dict is also allowed here\n",
|
||||
" key=chat_id,\n",
|
||||
" value=json.dumps(value), # needs to be a string\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(\"Started chat\")\n",
|
||||
" print(\"--------------------------------------------\")\n",
|
||||
" print(value)\n",
|
||||
" print(\"--------------------------------------------\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chat_init()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "gArPPx2f_bgf"
|
||||
},
|
||||
"source": [
|
||||
"### 9. Initialize the reply function\n",
|
||||
"\n",
|
||||
"This function defines how the chatbot should reply to incoming messages. Instead of sending a fixed message like the previous cell, we generate a reply using Llama-2 and send that reply back to the \"chat\" Kafka topic."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {
|
||||
"id": "yN5t71hY_hgn"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def reply(row: dict, state: State):\n",
|
||||
" print(\"-------------------------------\")\n",
|
||||
" print(\"Received:\")\n",
|
||||
" print(row)\n",
|
||||
" print(\"-------------------------------\")\n",
|
||||
" print(f\"Thinking about the reply to: {row['text']}...\")\n",
|
||||
"\n",
|
||||
" msg = chain.run(row[\"text\"])\n",
|
||||
" print(f\"{role.upper()} replying with: {msg}\\n\")\n",
|
||||
"\n",
|
||||
" row[\"role\"] = role\n",
|
||||
" row[\"text\"] = msg\n",
|
||||
"\n",
|
||||
" # Replace previous role and text values of the row so that it can be sent back to Kafka as a new message\n",
|
||||
" # containing the agents role and reply\n",
|
||||
" return row"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "HZHwmIR0_kFY"
|
||||
},
|
||||
"source": [
|
||||
"### 10. Check the Kafka topic for new human messages and have the model generate a reply\n",
|
||||
"\n",
|
||||
"If you are running this cell for this first time, run it and wait until you see Marvin's greeting ('Hello my name is Marvin...') in the console output. Stop the cell manually and proceed to the next cell where you'll be prompted for your reply.\n",
|
||||
"\n",
|
||||
"Once you have typed in your message, come back to this cell. Your reply is also sent to the same \"chat\" topic. The Kafka consumer checks for new messages and filters out messages that originate from the chatbot itself, leaving only the latest human messages.\n",
|
||||
"\n",
|
||||
"Once a new human message is detected, the reply function is triggered.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"_STOP THIS CELL MANUALLY WHEN YOU RECEIVE A REPLY FROM THE LLM IN THE OUTPUT_"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "-adXc3eQ_qwI"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define your application and settings\n",
|
||||
"app = Application(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" consumer_group=\"aichat\",\n",
|
||||
" auto_offset_reset=\"earliest\",\n",
|
||||
" consumer_extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Define an input topic with JSON deserializer\n",
|
||||
"input_topic = app.topic(\"chat\", value_deserializer=\"json\")\n",
|
||||
"# Define an output topic with JSON serializer\n",
|
||||
"output_topic = app.topic(\"chat\", value_serializer=\"json\")\n",
|
||||
"# Initialize a streaming dataframe based on the stream of messages from the input topic:\n",
|
||||
"sdf = app.dataframe(topic=input_topic)\n",
|
||||
"\n",
|
||||
"# Filter the SDF to include only incoming rows where the roles that dont match the bot's current role\n",
|
||||
"sdf = sdf.update(\n",
|
||||
" lambda val: print(\n",
|
||||
" f\"Received update: {val}\\n\\nSTOP THIS CELL MANUALLY TO HAVE THE LLM REPLY OR ENTER YOUR OWN FOLLOWUP RESPONSE\"\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# So that it doesn't reply to its own messages\n",
|
||||
"sdf = sdf[sdf[\"role\"] != role]\n",
|
||||
"\n",
|
||||
"# Trigger the reply function for any new messages(rows) detected in the filtered SDF\n",
|
||||
"sdf = sdf.apply(reply, stateful=True)\n",
|
||||
"\n",
|
||||
"# Check the SDF again and filter out any empty rows\n",
|
||||
"sdf = sdf[sdf.apply(lambda row: row is not None)]\n",
|
||||
"\n",
|
||||
"# Update the timestamp column to the current time in nanoseconds\n",
|
||||
"sdf[\"Timestamp\"] = sdf[\"Timestamp\"].apply(lambda row: time.time_ns())\n",
|
||||
"\n",
|
||||
"# Publish the processed SDF to a Kafka topic specified by the output_topic object.\n",
|
||||
"sdf = sdf.to_topic(output_topic)\n",
|
||||
"\n",
|
||||
"app.run(sdf)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "EwXYrmWD_0CX"
|
||||
},
|
||||
"source": [
|
||||
"\n",
|
||||
"### 11. Enter a human message\n",
|
||||
"\n",
|
||||
"Run this cell to enter your message that you want to sent to the model. It uses another Kafka producer to send your text to the \"chat\" Kafka topic for the model to pick up (requires running the previous cell again)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "6sxOPxSP_3iu"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat_input = input(\"Please enter your reply: \")\n",
|
||||
"myreply = chat_input\n",
|
||||
"\n",
|
||||
"msgvalue = {\n",
|
||||
" \"uuid\": chat_id, # leave empty for now\n",
|
||||
" \"role\": \"human\",\n",
|
||||
" \"text\": myreply,\n",
|
||||
" \"conversation_id\": chat_id,\n",
|
||||
" \"Timestamp\": time.time_ns(),\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"with Producer(\n",
|
||||
" broker_address=\"127.0.0.1:9092\",\n",
|
||||
" extra_config={\"allow.auto.create.topics\": \"true\"},\n",
|
||||
") as producer:\n",
|
||||
" value = msgvalue\n",
|
||||
" producer.produce(\n",
|
||||
" topic=\"chat\",\n",
|
||||
" headers=[(\"uuid\", str(uuid.uuid4()))], # a dict is also allowed here\n",
|
||||
" key=chat_id, # leave empty for now\n",
|
||||
" value=json.dumps(value), # needs to be a string\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(\"Replied to chatbot with message: \")\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(value)\n",
|
||||
"print(\"--------------------------------------------\")\n",
|
||||
"print(\"\\n\\nRUN THE PREVIOUS CELL TO HAVE THE CHATBOT GENERATE A REPLY\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cSx3s7TBBegg"
|
||||
},
|
||||
"source": [
|
||||
"### Why route chat messages through Kafka?\n",
|
||||
"\n",
|
||||
"It's easier to interact with the LLM directly using LangChains built-in conversation management features. Plus you can also use a REST API to generate a response from an externally hosted model. So why go to the trouble of using Apache Kafka?\n",
|
||||
"\n",
|
||||
"There are a few reasons, such as:\n",
|
||||
"\n",
|
||||
" * **Integration**: Many enterprises want to run their own LLMs so that they can keep their data in-house. This requires integrating LLM-powered components into existing architectures that might already be decoupled using some kind of message bus.\n",
|
||||
"\n",
|
||||
" * **Scalability**: Apache Kafka is designed with parallel processing in mind, so many teams prefer to use it to more effectively distribute work to available workers (in this case the \"worker\" is a container running an LLM).\n",
|
||||
"\n",
|
||||
" * **Durability**: Kafka is designed to allow services to pick up where another service left off in the case where that service experienced a memory issue or went offline. This prevents data loss in highly complex, distributed architectures where multiple systems are communicating with one another (LLMs being just one of many interdependent systems that also include vector databases and traditional databases).\n",
|
||||
"\n",
|
||||
"For more background on why event streaming is a good fit for Gen AI application architecture, see Kai Waehner's article [\"Apache Kafka + Vector Database + LLM = Real-Time GenAI\"](https://www.kai-waehner.de/blog/2023/11/08/apache-kafka-flink-vector-database-llm-real-time-genai/)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"gpuType": "T4",
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"widgets": {
|
||||
"application/vnd.jupyter.widget-state+json": {
|
||||
"0def954cca89466b8408fadaf3b82e64": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "FloatProgressModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "FloatProgressModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "ProgressView",
|
||||
"bar_style": "success",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_fb6478ce2dac489bb633b23ba0953c5c",
|
||||
"max": 4081004224,
|
||||
"min": 0,
|
||||
"orientation": "horizontal",
|
||||
"style": "IPY_MODEL_734b0f5da9fc4307a95bab48cdbb5d89",
|
||||
"value": 4081004224
|
||||
}
|
||||
},
|
||||
"30ecca964a394109ac2ad757e3aec6c0": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "DescriptionStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "DescriptionStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"462482accc664729980562e208ceb179": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HTMLModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HTMLModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HTMLView",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_b32f3a86a74741348511f4e136744ac8",
|
||||
"placeholder": "",
|
||||
"style": "IPY_MODEL_e409071bff5a4e2d9bf0e9f5cc42231b",
|
||||
"value": " 4.08G/4.08G [00:33<00:00, 184MB/s]"
|
||||
}
|
||||
},
|
||||
"734b0f5da9fc4307a95bab48cdbb5d89": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "ProgressStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "ProgressStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"bar_color": null,
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"80d842f73c564dc7b7cc316c763e2633": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"969343cdbe604a26926679bbf8bd2dda": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HBoxModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HBoxModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HBoxView",
|
||||
"box_style": "",
|
||||
"children": [
|
||||
"IPY_MODEL_d8b8370c9b514715be7618bfe6832844",
|
||||
"IPY_MODEL_0def954cca89466b8408fadaf3b82e64",
|
||||
"IPY_MODEL_462482accc664729980562e208ceb179"
|
||||
],
|
||||
"layout": "IPY_MODEL_80d842f73c564dc7b7cc316c763e2633"
|
||||
}
|
||||
},
|
||||
"b32f3a86a74741348511f4e136744ac8": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"d8b8370c9b514715be7618bfe6832844": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "HTMLModel",
|
||||
"state": {
|
||||
"_dom_classes": [],
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "HTMLModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/controls",
|
||||
"_view_module_version": "1.5.0",
|
||||
"_view_name": "HTMLView",
|
||||
"description": "",
|
||||
"description_tooltip": null,
|
||||
"layout": "IPY_MODEL_fa055d9f2a9d4a789e9cf3c89e0214e5",
|
||||
"placeholder": "",
|
||||
"style": "IPY_MODEL_30ecca964a394109ac2ad757e3aec6c0",
|
||||
"value": "llama-2-7b-chat.Q4_K_M.gguf: 100%"
|
||||
}
|
||||
},
|
||||
"e409071bff5a4e2d9bf0e9f5cc42231b": {
|
||||
"model_module": "@jupyter-widgets/controls",
|
||||
"model_module_version": "1.5.0",
|
||||
"model_name": "DescriptionStyleModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/controls",
|
||||
"_model_module_version": "1.5.0",
|
||||
"_model_name": "DescriptionStyleModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "StyleView",
|
||||
"description_width": ""
|
||||
}
|
||||
},
|
||||
"fa055d9f2a9d4a789e9cf3c89e0214e5": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
},
|
||||
"fb6478ce2dac489bb633b23ba0953c5c": {
|
||||
"model_module": "@jupyter-widgets/base",
|
||||
"model_module_version": "1.2.0",
|
||||
"model_name": "LayoutModel",
|
||||
"state": {
|
||||
"_model_module": "@jupyter-widgets/base",
|
||||
"_model_module_version": "1.2.0",
|
||||
"_model_name": "LayoutModel",
|
||||
"_view_count": null,
|
||||
"_view_module": "@jupyter-widgets/base",
|
||||
"_view_module_version": "1.2.0",
|
||||
"_view_name": "LayoutView",
|
||||
"align_content": null,
|
||||
"align_items": null,
|
||||
"align_self": null,
|
||||
"border": null,
|
||||
"bottom": null,
|
||||
"display": null,
|
||||
"flex": null,
|
||||
"flex_flow": null,
|
||||
"grid_area": null,
|
||||
"grid_auto_columns": null,
|
||||
"grid_auto_flow": null,
|
||||
"grid_auto_rows": null,
|
||||
"grid_column": null,
|
||||
"grid_gap": null,
|
||||
"grid_row": null,
|
||||
"grid_template_areas": null,
|
||||
"grid_template_columns": null,
|
||||
"grid_template_rows": null,
|
||||
"height": null,
|
||||
"justify_content": null,
|
||||
"justify_items": null,
|
||||
"left": null,
|
||||
"margin": null,
|
||||
"max_height": null,
|
||||
"max_width": null,
|
||||
"min_height": null,
|
||||
"min_width": null,
|
||||
"object_fit": null,
|
||||
"object_position": null,
|
||||
"order": null,
|
||||
"overflow": null,
|
||||
"overflow_x": null,
|
||||
"overflow_y": null,
|
||||
"padding": null,
|
||||
"right": null,
|
||||
"top": null,
|
||||
"visibility": null,
|
||||
"width": null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -227,8 +227,8 @@
|
||||
" BaseCombineDocumentsChain,\n",
|
||||
" load_qa_with_sources_chain,\n",
|
||||
")\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain.tools import BaseTool, DuckDuckGoSearchRun\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"from pydantic import Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"source": [
|
||||
"1. Prepare data:\n",
|
||||
" 1. Upload all python project files using the `langchain_community.document_loaders.TextLoader`. We will call these files the **documents**.\n",
|
||||
" 2. Split all documents to chunks using the `langchain.text_splitter.CharacterTextSplitter`.\n",
|
||||
" 2. Split all documents to chunks using the `langchain_text_splitters.CharacterTextSplitter`.\n",
|
||||
" 3. Embed chunks and upload them into the DeepLake using `langchain.embeddings.openai.OpenAIEmbeddings` and `langchain_community.vectorstores.DeepLake`\n",
|
||||
"2. Question-Answering:\n",
|
||||
" 1. Build a chain from `langchain.chat_models.ChatOpenAI` and `langchain.chains.ConversationalRetrievalChain`\n",
|
||||
@@ -621,7 +621,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(docs)\n",
|
||||
|
||||
@@ -42,9 +42,9 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -114,8 +114,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -67,9 +67,9 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -138,8 +138,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -40,8 +40,8 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -103,8 +103,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"source": [
|
||||
"from typing import Any, List, Tuple, Union\n",
|
||||
"\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class FakeAgent(BaseMultiActionAgent):\n",
|
||||
|
||||
1001
cookbook/data/imdb_top_1000.csv
Normal file
1001
cookbook/data/imdb_top_1000.csv
Normal file
File diff suppressed because it is too large
Load Diff
@@ -52,12 +52,12 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.text_splitter import (\n",
|
||||
"from langchain_community.vectorstores import DeepLake\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import (\n",
|
||||
" CharacterTextSplitter,\n",
|
||||
" RecursiveCharacterTextSplitter,\n",
|
||||
")\n",
|
||||
"from langchain_community.vectorstores import DeepLake\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",
|
||||
|
||||
245
cookbook/fireworks_rag.ipynb
Normal file
245
cookbook/fireworks_rag.ipynb
Normal file
@@ -0,0 +1,245 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0fc0309d-4d49-4bb5-bec0-bd92c6fddb28",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Fireworks.AI + LangChain + RAG\n",
|
||||
" \n",
|
||||
"[Fireworks AI](https://python.langchain.com/docs/integrations/llms/fireworks) wants to provide the best experience when working with LangChain, and here is an example of Fireworks + LangChain doing RAG\n",
|
||||
"\n",
|
||||
"See [our models page](https://fireworks.ai/models) for the full list of models. We use `accounts/fireworks/models/mixtral-8x7b-instruct` for RAG In this tutorial.\n",
|
||||
"\n",
|
||||
"For the RAG target, we will use the Gemma technical report https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "d12fb75a-f707-48d5-82a5-efe2d041813c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Found existing installation: langchain-fireworks 0.0.1\n",
|
||||
"Uninstalling langchain-fireworks-0.0.1:\n",
|
||||
" Successfully uninstalled langchain-fireworks-0.0.1\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n",
|
||||
"Obtaining file:///mnt/disks/data/langchain/libs/partners/fireworks\n",
|
||||
" Installing build dependencies ... \u001b[?25ldone\n",
|
||||
"\u001b[?25h Checking if build backend supports build_editable ... \u001b[?25ldone\n",
|
||||
"\u001b[?25h Getting requirements to build editable ... \u001b[?25ldone\n",
|
||||
"\u001b[?25h Preparing editable metadata (pyproject.toml) ... \u001b[?25ldone\n",
|
||||
"\u001b[?25hRequirement already satisfied: aiohttp<4.0.0,>=3.9.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-fireworks==0.0.1) (3.9.3)\n",
|
||||
"Requirement already satisfied: fireworks-ai<0.13.0,>=0.12.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-fireworks==0.0.1) (0.12.0)\n",
|
||||
"Requirement already satisfied: langchain-core<0.2,>=0.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-fireworks==0.0.1) (0.1.23)\n",
|
||||
"Requirement already satisfied: requests<3,>=2 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-fireworks==0.0.1) (2.31.0)\n",
|
||||
"Requirement already satisfied: aiosignal>=1.1.2 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (1.3.1)\n",
|
||||
"Requirement already satisfied: attrs>=17.3.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (23.1.0)\n",
|
||||
"Requirement already satisfied: frozenlist>=1.1.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (1.4.0)\n",
|
||||
"Requirement already satisfied: multidict<7.0,>=4.5 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (6.0.4)\n",
|
||||
"Requirement already satisfied: yarl<2.0,>=1.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (1.9.2)\n",
|
||||
"Requirement already satisfied: async-timeout<5.0,>=4.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from aiohttp<4.0.0,>=3.9.1->langchain-fireworks==0.0.1) (4.0.3)\n",
|
||||
"Requirement already satisfied: httpx in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (0.26.0)\n",
|
||||
"Requirement already satisfied: httpx-sse in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (0.4.0)\n",
|
||||
"Requirement already satisfied: pydantic in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (2.4.2)\n",
|
||||
"Requirement already satisfied: Pillow in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (10.2.0)\n",
|
||||
"Requirement already satisfied: PyYAML>=5.3 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (6.0.1)\n",
|
||||
"Requirement already satisfied: anyio<5,>=3 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (3.7.1)\n",
|
||||
"Requirement already satisfied: jsonpatch<2.0,>=1.33 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (1.33)\n",
|
||||
"Requirement already satisfied: langsmith<0.2.0,>=0.1.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (0.1.5)\n",
|
||||
"Requirement already satisfied: packaging<24.0,>=23.2 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (23.2)\n",
|
||||
"Requirement already satisfied: tenacity<9.0.0,>=8.1.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (8.2.3)\n",
|
||||
"Requirement already satisfied: charset-normalizer<4,>=2 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from requests<3,>=2->langchain-fireworks==0.0.1) (3.3.0)\n",
|
||||
"Requirement already satisfied: idna<4,>=2.5 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from requests<3,>=2->langchain-fireworks==0.0.1) (3.4)\n",
|
||||
"Requirement already satisfied: urllib3<3,>=1.21.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from requests<3,>=2->langchain-fireworks==0.0.1) (2.0.6)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from requests<3,>=2->langchain-fireworks==0.0.1) (2023.7.22)\n",
|
||||
"Requirement already satisfied: sniffio>=1.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from anyio<5,>=3->langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (1.3.0)\n",
|
||||
"Requirement already satisfied: exceptiongroup in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from anyio<5,>=3->langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (1.1.3)\n",
|
||||
"Requirement already satisfied: jsonpointer>=1.9 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.2,>=0.1->langchain-fireworks==0.0.1) (2.4)\n",
|
||||
"Requirement already satisfied: annotated-types>=0.4.0 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from pydantic->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (0.5.0)\n",
|
||||
"Requirement already satisfied: pydantic-core==2.10.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from pydantic->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (2.10.1)\n",
|
||||
"Requirement already satisfied: typing-extensions>=4.6.1 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from pydantic->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (4.8.0)\n",
|
||||
"Requirement already satisfied: httpcore==1.* in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from httpx->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (1.0.2)\n",
|
||||
"Requirement already satisfied: h11<0.15,>=0.13 in /mnt/disks/data/langchain/.venv/lib/python3.9/site-packages (from httpcore==1.*->httpx->fireworks-ai<0.13.0,>=0.12.0->langchain-fireworks==0.0.1) (0.14.0)\n",
|
||||
"Building wheels for collected packages: langchain-fireworks\n",
|
||||
" Building editable for langchain-fireworks (pyproject.toml) ... \u001b[?25ldone\n",
|
||||
"\u001b[?25h Created wheel for langchain-fireworks: filename=langchain_fireworks-0.0.1-py3-none-any.whl size=2228 sha256=564071b120b09ec31f2dc737733448a33bbb26e40b49fcde0c129ad26045259d\n",
|
||||
" Stored in directory: /tmp/pip-ephem-wheel-cache-oz368vdk/wheels/e0/ad/31/d7e76dd73d61905ff7f369f5b0d21a4b5e7af4d3cb7487aece\n",
|
||||
"Successfully built langchain-fireworks\n",
|
||||
"Installing collected packages: langchain-fireworks\n",
|
||||
"Successfully installed langchain-fireworks-0.0.1\n",
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --quiet pypdf chromadb tiktoken openai \n",
|
||||
"%pip uninstall -y langchain-fireworks\n",
|
||||
"%pip install --editable /mnt/disks/data/langchain/libs/partners/fireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cf719376",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<module 'fireworks' from '/mnt/disks/data/langchain/.venv/lib/python3.9/site-packages/fireworks/__init__.py'>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import fireworks\n",
|
||||
"\n",
|
||||
"print(fireworks)\n",
|
||||
"import fireworks.client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9ab49327-0532-4480-804c-d066c302a322",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load\n",
|
||||
"import requests\n",
|
||||
"from langchain_community.document_loaders import PyPDFLoader\n",
|
||||
"\n",
|
||||
"# Download the PDF from a URL and save it to a temporary location\n",
|
||||
"url = \"https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf\"\n",
|
||||
"response = requests.get(url, stream=True)\n",
|
||||
"file_name = \"temp_file.pdf\"\n",
|
||||
"with open(file_name, \"wb\") as pdf:\n",
|
||||
" pdf.write(response.content)\n",
|
||||
"\n",
|
||||
"loader = PyPDFLoader(file_name)\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"# Split\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"# Add to vectorDB\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_fireworks.embeddings import FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(\n",
|
||||
" documents=all_splits,\n",
|
||||
" collection_name=\"rag-chroma\",\n",
|
||||
" embedding=FireworksEmbeddings(),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "4efaddd9-3dbb-455c-ba54-0ad7f2d2ce0f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
||||
"\n",
|
||||
"# RAG prompt\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"# LLM\n",
|
||||
"from langchain_together import Together\n",
|
||||
"\n",
|
||||
"llm = Together(\n",
|
||||
" model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n",
|
||||
" temperature=0.0,\n",
|
||||
" max_tokens=2000,\n",
|
||||
" top_k=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# RAG chain\n",
|
||||
"chain = (\n",
|
||||
" RunnableParallel({\"context\": retriever, \"question\": RunnablePassthrough()})\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "88b1ee51-1b0f-4ebf-bb32-e50e843f0eeb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nAnswer: The architectural details of Mixtral are as follows:\\n- Dimension (dim): 4096\\n- Number of layers (n\\\\_layers): 32\\n- Dimension of each head (head\\\\_dim): 128\\n- Hidden dimension (hidden\\\\_dim): 14336\\n- Number of heads (n\\\\_heads): 32\\n- Number of kv heads (n\\\\_kv\\\\_heads): 8\\n- Context length (context\\\\_len): 32768\\n- Vocabulary size (vocab\\\\_size): 32000\\n- Number of experts (num\\\\_experts): 8\\n- Number of top k experts (top\\\\_k\\\\_experts): 2\\n\\nMixtral is based on a transformer architecture and uses the same modifications as described in [18], with the notable exceptions that Mixtral supports a fully dense context length of 32k tokens, and the feedforward block picks from a set of 8 distinct groups of parameters. At every layer, for every token, a router network chooses two of these groups (the “experts”) to process the token and combine their output additively. This technique increases the number of parameters of a model while controlling cost and latency, as the model only uses a fraction of the total set of parameters per token. Mixtral is pretrained with multilingual data using a context size of 32k tokens. It either matches or exceeds the performance of Llama 2 70B and GPT-3.5, over several benchmarks. In particular, Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and multilingual benchmarks.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\"What are the Architectural details of Mixtral?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "755cf871-26b7-4e30-8b91-9ffd698470f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Trace: \n",
|
||||
"\n",
|
||||
"https://smith.langchain.com/public/935fd642-06a6-4b42-98e3-6074f93115cd/r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -73,8 +73,9 @@
|
||||
" AsyncCallbackManagerForRetrieverRun,\n",
|
||||
" CallbackManagerForRetrieverRun,\n",
|
||||
")\n",
|
||||
"from langchain.schema import BaseRetriever, Document\n",
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.retrievers import BaseRetriever\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -170,8 +170,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"with open(\"../../state_of_the_union.txt\") as f:\n",
|
||||
" state_of_the_union = f.read()\n",
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
"\n",
|
||||
"checker_chain = LLMCheckerChain.from_llm(llm, verbose=True)\n",
|
||||
"\n",
|
||||
"checker_chain.run(text)"
|
||||
"checker_chain.invoke(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_math = LLMMathChain.from_llm(llm, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_math.run(\"What is 13 raised to the .3432 power?\")"
|
||||
"llm_math.invoke(\"What is 13 raised to the .3432 power?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -124,7 +124,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = CharacterTextSplitter.from_tiktoken_encoder(\n",
|
||||
" chunk_size=7500, chunk_overlap=100\n",
|
||||
|
||||
@@ -20,10 +20,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -358,7 +358,7 @@
|
||||
"\n",
|
||||
"from langchain.chains.openai_functions import create_qa_with_structure_chain\n",
|
||||
"from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
|
||||
648
cookbook/optimization.ipynb
Normal file
648
cookbook/optimization.ipynb
Normal file
@@ -0,0 +1,648 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c7fe38bc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Optimization\n",
|
||||
"\n",
|
||||
"This notebook goes over how to optimize chains using LangChain and [LangSmith](https://smith.langchain.com)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2f87ccd5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up\n",
|
||||
"\n",
|
||||
"We will set an environment variable for LangSmith, and load the relevant data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "236bedc5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"LANGCHAIN_PROJECT\"] = \"movie-qa\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a3fed0dd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "7cfff337",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = pd.read_csv(\"data/imdb_top_1000.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "2d20fb9c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df[\"Released_Year\"] = df[\"Released_Year\"].astype(int, errors=\"ignore\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09fc8fe2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the initial retrieval chain\n",
|
||||
"\n",
|
||||
"We will use a self-query retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "f71e24e2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "8881ea8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"records = df.to_dict(\"records\")\n",
|
||||
"documents = [Document(page_content=d[\"Overview\"], metadata=d) for d in records]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "8f495423",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Chroma.from_documents(documents, embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "31d33d62",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"metadata_field_info = [\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"Released_Year\",\n",
|
||||
" description=\"The year the movie was released\",\n",
|
||||
" type=\"int\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"Series_Title\",\n",
|
||||
" description=\"The title of the movie\",\n",
|
||||
" type=\"str\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"Genre\",\n",
|
||||
" description=\"The genre of the movie\",\n",
|
||||
" type=\"string\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"IMDB_Rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"document_content_description = \"Brief summary of a movie\"\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||
" llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "a731533b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnablePassthrough"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "05181849",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "feed4be6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"\"\"Answer the user's question based on the below information:\n",
|
||||
"\n",
|
||||
"Information:\n",
|
||||
"\n",
|
||||
"{info}\n",
|
||||
"\n",
|
||||
"Question: {question}\"\"\"\n",
|
||||
")\n",
|
||||
"generator = (prompt | ChatOpenAI() | StrOutputParser()).with_config(\n",
|
||||
" run_name=\"generator\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "eb16cc9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = (\n",
|
||||
" RunnablePassthrough.assign(info=(lambda x: x[\"question\"]) | retriever) | generator\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c70911cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run examples\n",
|
||||
"\n",
|
||||
"Run examples through the chain. This can either be manually, or using a list of examples, or production traffic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "19a88d13",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'One of the horror movies released in the early 2000s is \"The Ring\" (2002), directed by Gore Verbinski.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"question\": \"what is a horror movie released in early 2000s\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17f9cdae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Annotate\n",
|
||||
"\n",
|
||||
"Now, go to LangSmitha and annotate those examples as correct or incorrect"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5e211da6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Dataset\n",
|
||||
"\n",
|
||||
"We can now create a dataset from those runs.\n",
|
||||
"\n",
|
||||
"What we will do is find the runs marked as correct, then grab the sub-chains from them. Specifically, the query generator sub chain and the final generation step"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "e4024267",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langsmith import Client\n",
|
||||
"\n",
|
||||
"client = Client()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "3814efc5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"14"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runs = list(\n",
|
||||
" client.list_runs(\n",
|
||||
" project_name=\"movie-qa\",\n",
|
||||
" execution_order=1,\n",
|
||||
" filter=\"and(eq(feedback_key, 'correctness'), eq(feedback_score, 1))\",\n",
|
||||
" )\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"len(runs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "3eb123e0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gen_runs = []\n",
|
||||
"query_runs = []\n",
|
||||
"for r in runs:\n",
|
||||
" gen_runs.extend(\n",
|
||||
" list(\n",
|
||||
" client.list_runs(\n",
|
||||
" project_name=\"movie-qa\",\n",
|
||||
" filter=\"eq(name, 'generator')\",\n",
|
||||
" trace_id=r.trace_id,\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" query_runs.extend(\n",
|
||||
" list(\n",
|
||||
" client.list_runs(\n",
|
||||
" project_name=\"movie-qa\",\n",
|
||||
" filter=\"eq(name, 'query_constructor')\",\n",
|
||||
" trace_id=r.trace_id,\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "a4397026",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'what is a high school comedy released in early 2000s'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runs[0].inputs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "3fa6ad2a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output': 'One high school comedy released in the early 2000s is \"Mean Girls\" starring Lindsay Lohan, Rachel McAdams, and Tina Fey.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"runs[0].outputs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "1fda5b4b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'query': 'what is a high school comedy released in early 2000s'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query_runs[0].inputs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "1a1a51e6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output': {'query': 'high school comedy',\n",
|
||||
" 'filter': {'operator': 'and',\n",
|
||||
" 'arguments': [{'comparator': 'eq', 'attribute': 'Genre', 'value': 'comedy'},\n",
|
||||
" {'operator': 'and',\n",
|
||||
" 'arguments': [{'comparator': 'gte',\n",
|
||||
" 'attribute': 'Released_Year',\n",
|
||||
" 'value': 2000},\n",
|
||||
" {'comparator': 'lt', 'attribute': 'Released_Year', 'value': 2010}]}]}}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query_runs[0].outputs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "e9d9966b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'question': 'what is a high school comedy released in early 2000s',\n",
|
||||
" 'info': []}"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"gen_runs[0].inputs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "bc113f3d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output': 'One high school comedy released in the early 2000s is \"Mean Girls\" starring Lindsay Lohan, Rachel McAdams, and Tina Fey.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"gen_runs[0].outputs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6cca74e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create datasets\n",
|
||||
"\n",
|
||||
"We can now create datasets for the query generation and final generation step.\n",
|
||||
"We do this so that (1) we can inspect the datapoints, (2) we can edit them if needed, (3) we can add to them over time"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "69966f0e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"client.create_dataset(\"movie-query_constructor\")\n",
|
||||
"\n",
|
||||
"inputs = [r.inputs for r in query_runs]\n",
|
||||
"outputs = [r.outputs for r in query_runs]\n",
|
||||
"\n",
|
||||
"client.create_examples(\n",
|
||||
" inputs=inputs, outputs=outputs, dataset_name=\"movie-query_constructor\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "7e15770e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"client.create_dataset(\"movie-generator\")\n",
|
||||
"\n",
|
||||
"inputs = [r.inputs for r in gen_runs]\n",
|
||||
"outputs = [r.outputs for r in gen_runs]\n",
|
||||
"\n",
|
||||
"client.create_examples(inputs=inputs, outputs=outputs, dataset_name=\"movie-generator\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "61cf9bcd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use as few shot examples\n",
|
||||
"\n",
|
||||
"We can now pull down a dataset and use them as few shot examples in a future chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "d9c79173",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"examples = list(client.list_examples(dataset_name=\"movie-query_constructor\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "a1771dd0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def filter_to_string(_filter):\n",
|
||||
" if \"operator\" in _filter:\n",
|
||||
" args = [filter_to_string(f) for f in _filter[\"arguments\"]]\n",
|
||||
" return f\"{_filter['operator']}({','.join(args)})\"\n",
|
||||
" else:\n",
|
||||
" comparator = _filter[\"comparator\"]\n",
|
||||
" attribute = json.dumps(_filter[\"attribute\"])\n",
|
||||
" value = json.dumps(_filter[\"value\"])\n",
|
||||
" return f\"{comparator}({attribute}, {value})\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "e67a3530",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_examples = []\n",
|
||||
"\n",
|
||||
"for e in examples:\n",
|
||||
" if \"filter\" in e.outputs[\"output\"]:\n",
|
||||
" string_filter = filter_to_string(e.outputs[\"output\"][\"filter\"])\n",
|
||||
" else:\n",
|
||||
" string_filter = \"NO_FILTER\"\n",
|
||||
" model_examples.append(\n",
|
||||
" (\n",
|
||||
" e.inputs[\"query\"],\n",
|
||||
" {\"query\": e.outputs[\"output\"][\"query\"], \"filter\": string_filter},\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"id": "84593135",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever1 = SelfQueryRetriever.from_llm(\n",
|
||||
" llm,\n",
|
||||
" vectorstore,\n",
|
||||
" document_content_description,\n",
|
||||
" metadata_field_info,\n",
|
||||
" verbose=True,\n",
|
||||
" chain_kwargs={\"examples\": model_examples},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "4ec9bb92",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain1 = (\n",
|
||||
" RunnablePassthrough.assign(info=(lambda x: x[\"question\"]) | retriever1) | generator\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "64eb88e2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'1. \"Saving Private Ryan\" (1998) - Directed by Steven Spielberg, this war film follows a group of soldiers during World War II as they search for a missing paratrooper.\\n\\n2. \"The Matrix\" (1999) - Directed by the Wachowskis, this science fiction action film follows a computer hacker who discovers the truth about the reality he lives in.\\n\\n3. \"Lethal Weapon 4\" (1998) - Directed by Richard Donner, this action-comedy film follows two mismatched detectives as they investigate a Chinese immigrant smuggling ring.\\n\\n4. \"The Fifth Element\" (1997) - Directed by Luc Besson, this science fiction action film follows a cab driver who must protect a mysterious woman who holds the key to saving the world.\\n\\n5. \"The Rock\" (1996) - Directed by Michael Bay, this action thriller follows a group of rogue military men who take over Alcatraz and threaten to launch missiles at San Francisco.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain1.invoke(\n",
|
||||
" {\"question\": \"what are good action movies made before 2000 but after 1997?\"}\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e1ee8b55",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -59,13 +59,13 @@
|
||||
"from baidubce.auth.bce_credentials import BceCredentials\n",
|
||||
"from baidubce.bce_client_configuration import BceClientConfiguration\n",
|
||||
"from langchain.chains.retrieval_qa import RetrievalQA\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders.baiducloud_bos_directory import (\n",
|
||||
" BaiduBOSDirectoryLoader,\n",
|
||||
")\n",
|
||||
"from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n",
|
||||
"from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n",
|
||||
"from langchain_community.vectorstores import BESVectorStore"
|
||||
"from langchain_community.vectorstores import BESVectorStore\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -19,7 +19,9 @@
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"For this example, we will use Pinecone and some fake data"
|
||||
"For this example, we will use Pinecone and some fake data. To configure Pinecone, set the following environment variable:\n",
|
||||
"\n",
|
||||
"- `PINECONE_API_KEY`: Your Pinecone API key"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,11 +31,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pinecone\n",
|
||||
"from langchain_community.vectorstores import Pinecone\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"pinecone.init(api_key=\"...\", environment=\"...\")"
|
||||
"from langchain_pinecone import PineconeVectorStore"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -64,7 +63,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_texts(\n",
|
||||
"vectorstore = PineconeVectorStore.from_texts(\n",
|
||||
" list(all_documents.values()), OpenAIEmbeddings(), index_name=\"rag-fusion\"\n",
|
||||
")"
|
||||
]
|
||||
@@ -162,7 +161,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vectorstore = Pinecone.from_existing_index(\"rag-fusion\", OpenAIEmbeddings())\n",
|
||||
"vectorstore = PineconeVectorStore.from_existing_index(\"rag-fusion\", OpenAIEmbeddings())\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
|
||||
591
cookbook/rag_with_quantized_embeddings.ipynb
Normal file
591
cookbook/rag_with_quantized_embeddings.ipynb
Normal file
@@ -0,0 +1,591 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6195da33-34c3-4ca2-943a-050b6dcbacbc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Embedding Documents using Optimized and Quantized Embedders\n",
|
||||
"\n",
|
||||
"In this tutorial, we will demo how to build a RAG pipeline, with the embedding for all documents done using Quantized Embedders.\n",
|
||||
"\n",
|
||||
"We will use a pipeline that will:\n",
|
||||
"\n",
|
||||
"* Create a document collection.\n",
|
||||
"* Embed all documents using Quantized Embedders.\n",
|
||||
"* Fetch relevant documents for our question.\n",
|
||||
"* Run an LLM answer the question.\n",
|
||||
"\n",
|
||||
"For more information about optimized models, we refer to [optimum-intel](https://github.com/huggingface/optimum-intel.git) and [IPEX](https://github.com/intel/intel-extension-for-pytorch).\n",
|
||||
"\n",
|
||||
"This tutorial is based on the [Langchain RAG tutorial here](https://towardsai.net/p/machine-learning/dense-x-retrieval-technique-in-langchain-and-llamaindex)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "26db2da5-3733-4a90-909e-6c11508ea140",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"import langchain\n",
|
||||
"import torch\n",
|
||||
"from bs4 import BeautifulSoup as Soup\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryByteStore, LocalFileStore\n",
|
||||
"from langchain_community.document_loaders.recursive_url_loader import (\n",
|
||||
" RecursiveUrlLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# noqa\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"\n",
|
||||
"# For our example, we'll load docs from the web\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa\n",
|
||||
"\n",
|
||||
"DOCSTORE_DIR = \".\"\n",
|
||||
"DOCSTORE_ID_KEY = \"doc_id\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5ccda4e-7af5-4355-b9c4-25547edf33f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Lets first load up this paper, and split into text chunks of size 1000."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "5f4d8888-53a6-49f5-a198-da5c92419ca4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Loaded 1 documents\n",
|
||||
"Split into 73 documents\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Could add more parsing here, as it's very raw.\n",
|
||||
"loader = RecursiveUrlLoader(\n",
|
||||
" \"https://ar5iv.labs.arxiv.org/html/1706.03762\",\n",
|
||||
" max_depth=2,\n",
|
||||
" extractor=lambda x: Soup(x, \"html.parser\").text,\n",
|
||||
")\n",
|
||||
"data = loader.load()\n",
|
||||
"print(f\"Loaded {len(data)} documents\")\n",
|
||||
"\n",
|
||||
"# Split\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
"print(f\"Split into {len(all_splits)} documents\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73e90632-2ac2-49eb-80da-ffe9ac4a278d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to embed our documents, we can use the ```QuantizedBiEncoderEmbeddings```, for efficient and fast embedding. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "9a68a6f6-332d-481e-bbea-ad763155ea36",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "89af89b48c55409b9999b8e0387fab5b",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"config.json: 0%| | 0.00/747 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "01ad1b6278194b53bf6a5a286a311864",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"pytorch_model.bin: 0%| | 0.00/45.9M [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "cb3bd1b88f7743c3b0322da3f021325c",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"inc_config.json: 0%| | 0.00/287 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"loading configuration file inc_config.json from cache at \n",
|
||||
"INCConfig {\n",
|
||||
" \"distillation\": {},\n",
|
||||
" \"neural_compressor_version\": \"2.4.1\",\n",
|
||||
" \"optimum_version\": \"1.16.2\",\n",
|
||||
" \"pruning\": {},\n",
|
||||
" \"quantization\": {\n",
|
||||
" \"dataset_num_samples\": 50,\n",
|
||||
" \"is_static\": true\n",
|
||||
" },\n",
|
||||
" \"save_onnx_model\": false,\n",
|
||||
" \"torch_version\": \"2.2.0\",\n",
|
||||
" \"transformers_version\": \"4.37.2\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"Using `INCModel` to load a TorchScript model will be deprecated in v1.15.0, to load your model please use `IPEXModel` instead.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "7439315ebcb746f5be11fe30bc7693f6",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"tokenizer_config.json: 0%| | 0.00/1.24k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "05265a3912254ce1ad43cc8086bcb0ca",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"vocab.txt: 0%| | 0.00/232k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a48f4245c60744f28f37cd3a7a24d198",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"tokenizer.json: 0%| | 0.00/711k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "584a63cace934033b4ab22d3a178582a",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"special_tokens_map.json: 0%| | 0.00/125 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import QuantizedBiEncoderEmbeddings\n",
|
||||
"from langchain_core.embeddings import Embeddings\n",
|
||||
"\n",
|
||||
"model_name = \"Intel/bge-small-en-v1.5-rag-int8-static\"\n",
|
||||
"encode_kwargs = {\"normalize_embeddings\": True} # set True to compute cosine similarity\n",
|
||||
"\n",
|
||||
"model_inc = QuantizedBiEncoderEmbeddings(\n",
|
||||
" model_name=model_name,\n",
|
||||
" encode_kwargs=encode_kwargs,\n",
|
||||
" query_instruction=\"Represent this sentence for searching relevant passages: \",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "360b2837-8024-47e0-a4ba-592505a9a5c8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With our embedder in place, lets define our retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "18bc0a73-1a13-4b2f-96ac-05a5313343b7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_multi_vector_retriever(\n",
|
||||
" docstore_id_key: str, collection_name: str, embedding_function: Embeddings\n",
|
||||
"):\n",
|
||||
" \"\"\"Create the composed retriever object.\"\"\"\n",
|
||||
" vectorstore = Chroma(\n",
|
||||
" collection_name=collection_name,\n",
|
||||
" embedding_function=embedding_function,\n",
|
||||
" )\n",
|
||||
" store = InMemoryByteStore()\n",
|
||||
"\n",
|
||||
" return MultiVectorRetriever(\n",
|
||||
" vectorstore=vectorstore,\n",
|
||||
" byte_store=store,\n",
|
||||
" id_key=docstore_id_key,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"retriever = get_multi_vector_retriever(DOCSTORE_ID_KEY, \"multi_vec_store\", model_inc)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8484078e-1bf0-4080-a354-ef23823fd6dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we divide each chunk into sub-docs:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "e12f48d4-6562-416b-8f28-342912e5756e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n",
|
||||
"id_key = \"doc_id\"\n",
|
||||
"doc_ids = [str(uuid.uuid4()) for _ in all_splits]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "a268ef5f-91c2-4d8e-87f0-53db376e6a29",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sub_docs = []\n",
|
||||
"for i, doc in enumerate(all_splits):\n",
|
||||
" _id = doc_ids[i]\n",
|
||||
" _sub_docs = child_text_splitter.split_documents([doc])\n",
|
||||
" for _doc in _sub_docs:\n",
|
||||
" _doc.metadata[id_key] = _id\n",
|
||||
" sub_docs.extend(_sub_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d84ea8f4-a5de-4d76-b44d-85e56583f489",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Lets write our documents into our new store. This will use our embedder on each document."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "1af831ce-0eae-44bc-aca7-4d691063640b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Batches: 100%|██████████| 8/8 [00:00<00:00, 9.05it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.vectorstore.add_documents(sub_docs)\n",
|
||||
"retriever.docstore.mset(list(zip(doc_ids, all_splits)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "580bc212-8ecd-4d28-8656-b96fcd0d7eb6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Great! Our retriever is good to go. Lets load up an LLM, that will reason over the retrieved documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "008c992f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": []
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "cbe70583ad964ae19582b72dab396784",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
|
||||
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"model_id = \"Intel/neural-chat-7b-v3-3\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, device_map=\"auto\", torch_dtype=torch.bfloat16\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=100)\n",
|
||||
"\n",
|
||||
"hf = HuggingFacePipeline(pipeline=pipe)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6dd21fb2-0442-477d-aae2-9e7ee1d1d778",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, we will load up a prompt for answering questions using retrieved documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "5e582509-caaf-4920-932c-4ce16162c789",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"prompt = hub.pull(\"rlm/rag-prompt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5cdfcba5-7ec7-4d0a-820e-4e200643a882",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now build our pipeline:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "b74d8dfb-72bb-46da-9df9-0dc47a3ac791",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"rag_chain = {\"context\": retriever, \"question\": RunnablePassthrough()} | prompt | hf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3bc53602-86d6-420f-91b1-fc2effa7e986",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Excellent! lets ask it a question.\n",
|
||||
"We will also use a verbose and debug, to check which documents were used by the model to produce the answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "f0a92c07-53da-4e1f-b880-ee83a36ee17d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence] Entering Chain run with input:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"input\": \"What is the first transduction model relying entirely on self-attention?\"\n",
|
||||
"}\n",
|
||||
"\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 2:chain:RunnableParallel<context,question>] Entering Chain run with input:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"input\": \"What is the first transduction model relying entirely on self-attention?\"\n",
|
||||
"}\n",
|
||||
"\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 2:chain:RunnableParallel<context,question> > 4:chain:RunnablePassthrough] Entering Chain run with input:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"input\": \"What is the first transduction model relying entirely on self-attention?\"\n",
|
||||
"}\n",
|
||||
"\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 2:chain:RunnableParallel<context,question> > 4:chain:RunnablePassthrough] [1ms] Exiting Chain run with output:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"output\": \"What is the first transduction model relying entirely on self-attention?\"\n",
|
||||
"}\n",
|
||||
"\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 2:chain:RunnableParallel<context,question>] [66ms] Exiting Chain run with output:\n",
|
||||
"\u001b[0m[outputs]\n",
|
||||
"\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 5:prompt:ChatPromptTemplate] Entering Prompt run with input:\n",
|
||||
"\u001b[0m[inputs]\n",
|
||||
"\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 5:prompt:ChatPromptTemplate] [1ms] Exiting Prompt run with output:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"prompts\",\n",
|
||||
" \"chat\",\n",
|
||||
" \"ChatPromptValue\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"messages\": [\n",
|
||||
" {\n",
|
||||
" \"lc\": 1,\n",
|
||||
" \"type\": \"constructor\",\n",
|
||||
" \"id\": [\n",
|
||||
" \"langchain\",\n",
|
||||
" \"schema\",\n",
|
||||
" \"messages\",\n",
|
||||
" \"HumanMessage\"\n",
|
||||
" ],\n",
|
||||
" \"kwargs\": {\n",
|
||||
" \"content\": \"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: What is the first transduction model relying entirely on self-attention? \\nContext: [Document(page_content='To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.\\\\nIn the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as (neural_gpu, ; NalBytenet2017, ) and (JonasFaceNet2017, ).\\\\n\\\\n\\\\n\\\\n\\\\n3 Model Architecture\\\\n\\\\nFigure 1: The Transformer - model architecture.', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention.\\\\n\\\\n\\\\nFor translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles. \\\\n\\\\n\\\\nWe are excited about the future of attention-based models and plan to apply them to other tasks. We plan to extend the Transformer to problems involving input and output modalities other than text and to investigate local, restricted attention mechanisms to efficiently handle large inputs and outputs such as images, audio and video.\\\\nMaking generation less sequential is another research goals of ours.', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences (bahdanau2014neural, ; structuredAttentionNetworks, ). In all but a few cases (decomposableAttnModel, ), however, such attention mechanisms are used in conjunction with a recurrent network.\\\\n\\\\n\\\\nIn this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n2 Background', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'})] \\nAnswer:\",\n",
|
||||
" \"additional_kwargs\": {}\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 6:llm:HuggingFacePipeline] Entering LLM run with input:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"prompts\": [\n",
|
||||
" \"Human: You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: What is the first transduction model relying entirely on self-attention? \\nContext: [Document(page_content='To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.\\\\nIn the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as (neural_gpu, ; NalBytenet2017, ) and (JonasFaceNet2017, ).\\\\n\\\\n\\\\n\\\\n\\\\n3 Model Architecture\\\\n\\\\nFigure 1: The Transformer - model architecture.', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention.\\\\n\\\\n\\\\nFor translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles. \\\\n\\\\n\\\\nWe are excited about the future of attention-based models and plan to apply them to other tasks. We plan to extend the Transformer to problems involving input and output modalities other than text and to investigate local, restricted attention mechanisms to efficiently handle large inputs and outputs such as images, audio and video.\\\\nMaking generation less sequential is another research goals of ours.', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences (bahdanau2014neural, ; structuredAttentionNetworks, ). In all but a few cases (decomposableAttnModel, ), however, such attention mechanisms are used in conjunction with a recurrent network.\\\\n\\\\n\\\\nIn this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n2 Background', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'}), Document(page_content='The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the', metadata={'source': 'https://ar5iv.labs.arxiv.org/html/1706.03762', 'title': '[1706.03762] Attention Is All You Need', 'language': 'en'})] \\nAnswer:\"\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence > 6:llm:HuggingFacePipeline] [4.34s] Exiting LLM run with output:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"generations\": [\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"text\": \" The first transduction model relying entirely on self-attention is the Transformer.\",\n",
|
||||
" \"generation_info\": null,\n",
|
||||
" \"type\": \"Generation\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" ],\n",
|
||||
" \"llm_output\": null,\n",
|
||||
" \"run\": null\n",
|
||||
"}\n",
|
||||
"\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:RunnableSequence] [4.41s] Exiting Chain run with output:\n",
|
||||
"\u001b[0m{\n",
|
||||
" \"output\": \" The first transduction model relying entirely on self-attention is the Transformer.\"\n",
|
||||
"}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"langchain.verbose = True\n",
|
||||
"langchain.debug = True\n",
|
||||
"\n",
|
||||
"llm_res = rag_chain.invoke(\n",
|
||||
" \"What is the first transduction model relying entirely on self-attention?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"id": "023404a1-401a-46e1-8ab5-cafbc8593b04",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The first transduction model relying entirely on self-attention is the Transformer.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_res"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0eaefd01-254a-445d-a95f-37889c126e0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Based on the retrieved documents, the answer is indeed correct :)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -51,11 +51,11 @@
|
||||
"from langchain.chains.base import Chain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.prompts.base import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.llms import BaseLLM\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.agents import AgentAction, AgentFinish\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1083,7 +1083,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import ElasticsearchStore\n",
|
||||
"from langchain_elasticsearch import ElasticsearchStore\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
|
||||
@@ -209,7 +209,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run({})"
|
||||
"chain.invoke({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"# Split\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)\n",
|
||||
|
||||
@@ -2610,7 +2610,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(docs)"
|
||||
|
||||
@@ -401,7 +401,7 @@
|
||||
")\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish"
|
||||
"from langchain_core.agents import AgentAction, AgentFinish"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
12
docker/Makefile
Normal file
12
docker/Makefile
Normal file
@@ -0,0 +1,12 @@
|
||||
# Makefile
|
||||
|
||||
build_graphdb:
|
||||
docker build --tag graphdb ./graphdb
|
||||
|
||||
start_graphdb:
|
||||
docker-compose up -d graphdb
|
||||
|
||||
down:
|
||||
docker-compose down -v --remove-orphans
|
||||
|
||||
.PHONY: build_graphdb start_graphdb down
|
||||
@@ -1,5 +1,10 @@
|
||||
# docker-compose to make it easier to spin up integration tests.
|
||||
# Services should use NON standard ports to avoid collision with
|
||||
# any existing services that might be used for development.
|
||||
# ATTENTION: When adding a service below use a non-standard port
|
||||
# increment by one from the preceding port.
|
||||
# For credentials always use `langchain` and `langchain` for the
|
||||
# username and password.
|
||||
version: "3"
|
||||
name: langchain-tests
|
||||
|
||||
@@ -15,3 +20,38 @@ services:
|
||||
- "6020:6379"
|
||||
volumes:
|
||||
- ./redis-volume:/data
|
||||
graphdb:
|
||||
image: graphdb
|
||||
ports:
|
||||
- "6021:7200"
|
||||
mongo:
|
||||
image: mongo:latest
|
||||
container_name: mongo_container
|
||||
ports:
|
||||
- "6022:27017"
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: langchain
|
||||
MONGO_INITDB_ROOT_PASSWORD: langchain
|
||||
postgres:
|
||||
image: postgres:16
|
||||
environment:
|
||||
POSTGRES_DB: langchain
|
||||
POSTGRES_USER: langchain
|
||||
POSTGRES_PASSWORD: langchain
|
||||
ports:
|
||||
- "6023:5432"
|
||||
command: |
|
||||
postgres -c log_statement=all
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"psql postgresql://langchain:langchain@localhost/langchain --command 'SELECT 1;' || exit 1",
|
||||
]
|
||||
interval: 5s
|
||||
retries: 60
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
5
docker/graphdb/Dockerfile
Normal file
5
docker/graphdb/Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM ontotext/graphdb:10.5.1
|
||||
RUN mkdir -p /opt/graphdb/dist/data/repositories/langchain
|
||||
COPY config.ttl /opt/graphdb/dist/data/repositories/langchain/
|
||||
COPY graphdb_create.sh /run.sh
|
||||
ENTRYPOINT bash /run.sh
|
||||
46
docker/graphdb/config.ttl
Normal file
46
docker/graphdb/config.ttl
Normal file
@@ -0,0 +1,46 @@
|
||||
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
|
||||
@prefix rep: <http://www.openrdf.org/config/repository#>.
|
||||
@prefix sr: <http://www.openrdf.org/config/repository/sail#>.
|
||||
@prefix sail: <http://www.openrdf.org/config/sail#>.
|
||||
@prefix graphdb: <http://www.ontotext.com/config/graphdb#>.
|
||||
|
||||
[] a rep:Repository ;
|
||||
rep:repositoryID "langchain" ;
|
||||
rdfs:label "" ;
|
||||
rep:repositoryImpl [
|
||||
rep:repositoryType "graphdb:SailRepository" ;
|
||||
sr:sailImpl [
|
||||
sail:sailType "graphdb:Sail" ;
|
||||
|
||||
graphdb:read-only "false" ;
|
||||
|
||||
# Inference and Validation
|
||||
graphdb:ruleset "empty" ;
|
||||
graphdb:disable-sameAs "true" ;
|
||||
graphdb:check-for-inconsistencies "false" ;
|
||||
|
||||
# Indexing
|
||||
graphdb:entity-id-size "32" ;
|
||||
graphdb:enable-context-index "false" ;
|
||||
graphdb:enablePredicateList "true" ;
|
||||
graphdb:enable-fts-index "false" ;
|
||||
graphdb:fts-indexes ("default" "iri") ;
|
||||
graphdb:fts-string-literals-index "default" ;
|
||||
graphdb:fts-iris-index "none" ;
|
||||
|
||||
# Queries and Updates
|
||||
graphdb:query-timeout "0" ;
|
||||
graphdb:throw-QueryEvaluationException-on-timeout "false" ;
|
||||
graphdb:query-limit-results "0" ;
|
||||
|
||||
# Settable in the file but otherwise hidden in the UI and in the RDF4J console
|
||||
graphdb:base-URL "http://example.org/owlim#" ;
|
||||
graphdb:defaultNS "" ;
|
||||
graphdb:imports "" ;
|
||||
graphdb:repository-type "file-repository" ;
|
||||
graphdb:storage-folder "storage" ;
|
||||
graphdb:entity-index-size "10000000" ;
|
||||
graphdb:in-memory-literal-properties "true" ;
|
||||
graphdb:enable-literal-index "true" ;
|
||||
]
|
||||
].
|
||||
28
docker/graphdb/graphdb_create.sh
Normal file
28
docker/graphdb/graphdb_create.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#! /bin/bash
|
||||
REPOSITORY_ID="langchain"
|
||||
GRAPHDB_URI="http://localhost:7200/"
|
||||
|
||||
echo -e "\nUsing GraphDB: ${GRAPHDB_URI}"
|
||||
|
||||
function startGraphDB {
|
||||
echo -e "\nStarting GraphDB..."
|
||||
exec /opt/graphdb/dist/bin/graphdb
|
||||
}
|
||||
|
||||
function waitGraphDBStart {
|
||||
echo -e "\nWaiting GraphDB to start..."
|
||||
for _ in $(seq 1 5); do
|
||||
CHECK_RES=$(curl --silent --write-out '%{http_code}' --output /dev/null ${GRAPHDB_URI}/rest/repositories)
|
||||
if [ "${CHECK_RES}" = '200' ]; then
|
||||
echo -e "\nUp and running"
|
||||
break
|
||||
fi
|
||||
sleep 30s
|
||||
echo "CHECK_RES: ${CHECK_RES}"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
startGraphDB &
|
||||
waitGraphDBStart
|
||||
wait
|
||||
1
docs/.gitignore
vendored
Normal file
1
docs/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/.quarto/
|
||||
1
docs/.yarnrc.yml
Normal file
1
docs/.yarnrc.yml
Normal file
@@ -0,0 +1 @@
|
||||
nodeLinker: node-modules
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Configuration file for the Sphinx documentation builder."""
|
||||
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# This file only contains a selection of the most common options. For a full
|
||||
@@ -49,7 +50,7 @@ class ExampleLinksDirective(SphinxDirective):
|
||||
class_or_func_name = self.arguments[0]
|
||||
links = imported_classes.get(class_or_func_name, {})
|
||||
list_node = nodes.bullet_list()
|
||||
for doc_name, link in links.items():
|
||||
for doc_name, link in sorted(links.items()):
|
||||
item_node = nodes.list_item()
|
||||
para_node = nodes.paragraph()
|
||||
link_node = nodes.reference()
|
||||
@@ -114,8 +115,8 @@ autodoc_pydantic_field_signature_prefix = "param"
|
||||
autodoc_member_order = "groupwise"
|
||||
autoclass_content = "both"
|
||||
autodoc_typehints_format = "short"
|
||||
autodoc_typehints = "both"
|
||||
|
||||
# autodoc_typehints = "description"
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ["templates"]
|
||||
|
||||
@@ -174,3 +175,6 @@ myst_enable_extensions = ["colon_fence"]
|
||||
|
||||
# generate autosummary even if no references
|
||||
autosummary_generate = True
|
||||
|
||||
html_copy_source = False
|
||||
html_show_sourcelink = False
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import typing
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
@@ -14,7 +15,6 @@ from pydantic import BaseModel
|
||||
ROOT_DIR = Path(__file__).parents[2].absolute()
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
|
||||
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
|
||||
|
||||
|
||||
@@ -218,8 +218,8 @@ def _construct_doc(
|
||||
|
||||
for module in namespaces:
|
||||
_members = members_by_namespace[module]
|
||||
classes = _members["classes_"]
|
||||
functions = _members["functions"]
|
||||
classes = [el for el in _members["classes_"] if el["is_public"]]
|
||||
functions = [el for el in _members["functions"] if el["is_public"]]
|
||||
if not (classes or functions):
|
||||
continue
|
||||
section = f":mod:`{package_namespace}.{module}`"
|
||||
@@ -245,9 +245,6 @@ Classes
|
||||
"""
|
||||
|
||||
for class_ in sorted(classes, key=lambda c: c["qualified_name"]):
|
||||
if not class_["is_public"]:
|
||||
continue
|
||||
|
||||
if class_["kind"] == "TypedDict":
|
||||
template = "typeddict.rst"
|
||||
elif class_["kind"] == "enum":
|
||||
@@ -265,7 +262,7 @@ Classes
|
||||
"""
|
||||
|
||||
if functions:
|
||||
_functions = [f["qualified_name"] for f in functions if f["is_public"]]
|
||||
_functions = [f["qualified_name"] for f in functions]
|
||||
fstring = "\n ".join(sorted(_functions))
|
||||
full_doc += f"""\
|
||||
Functions
|
||||
@@ -310,7 +307,14 @@ def _package_namespace(package_name: str) -> str:
|
||||
|
||||
def _package_dir(package_name: str = "langchain") -> Path:
|
||||
"""Return the path to the directory containing the documentation."""
|
||||
if package_name in ("langchain", "experimental", "community", "core", "cli"):
|
||||
if package_name in (
|
||||
"langchain",
|
||||
"experimental",
|
||||
"community",
|
||||
"core",
|
||||
"cli",
|
||||
"text-splitters",
|
||||
):
|
||||
return ROOT_DIR / "libs" / package_name / _package_namespace(package_name)
|
||||
else:
|
||||
return (
|
||||
@@ -323,31 +327,54 @@ def _package_dir(package_name: str = "langchain") -> Path:
|
||||
|
||||
|
||||
def _get_package_version(package_dir: Path) -> str:
|
||||
with open(package_dir.parent / "pyproject.toml", "r") as f:
|
||||
pyproject = toml.load(f)
|
||||
"""Return the version of the package."""
|
||||
try:
|
||||
with open(package_dir.parent / "pyproject.toml", "r") as f:
|
||||
pyproject = toml.load(f)
|
||||
except FileNotFoundError as e:
|
||||
print(
|
||||
f"pyproject.toml not found in {package_dir.parent}.\n"
|
||||
"You are either attempting to build a directory which is not a package or "
|
||||
"the package is missing a pyproject.toml file which should be added."
|
||||
"Aborting the build."
|
||||
)
|
||||
exit(1)
|
||||
return pyproject["tool"]["poetry"]["version"]
|
||||
|
||||
|
||||
def _out_file_path(package_name: str = "langchain") -> Path:
|
||||
def _out_file_path(package_name: str) -> Path:
|
||||
"""Return the path to the file containing the documentation."""
|
||||
return HERE / f"{package_name.replace('-', '_')}_api_reference.rst"
|
||||
|
||||
|
||||
def _doc_first_line(package_name: str = "langchain") -> str:
|
||||
def _doc_first_line(package_name: str) -> str:
|
||||
"""Return the path to the file containing the documentation."""
|
||||
return f".. {package_name.replace('-', '_')}_api_reference:\n\n"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
def main(dirs: Optional[list] = None) -> None:
|
||||
"""Generate the api_reference.rst file for each package."""
|
||||
for dir in os.listdir(ROOT_DIR / "libs"):
|
||||
if dir in ("cli", "partners"):
|
||||
print("Starting to build API reference files.")
|
||||
if not dirs:
|
||||
dirs = [
|
||||
dir_
|
||||
for dir_ in os.listdir(ROOT_DIR / "libs")
|
||||
if dir_ not in ("cli", "partners")
|
||||
]
|
||||
dirs += os.listdir(ROOT_DIR / "libs" / "partners")
|
||||
for dir_ in dirs:
|
||||
# Skip any hidden directories
|
||||
# Some of these could be present by mistake in the code base
|
||||
# e.g., .pytest_cache from running tests from the wrong location.
|
||||
if dir_.startswith("."):
|
||||
print("Skipping dir:", dir_)
|
||||
continue
|
||||
else:
|
||||
_build_rst_file(package_name=dir)
|
||||
for dir in os.listdir(ROOT_DIR / "libs" / "partners"):
|
||||
_build_rst_file(package_name=dir)
|
||||
print("Building package:", dir_)
|
||||
_build_rst_file(package_name=dir_)
|
||||
print("API reference files built.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
dirs = sys.argv[1:] or None
|
||||
main(dirs=dirs)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -43,6 +43,9 @@
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" href="{{ pathto('experimental_api_reference') }}">Experimental</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link" href="{{ pathto('text_splitters_api_reference') }}">Text splitters</a>
|
||||
</li>
|
||||
{%- for title, pathname in partners %}
|
||||
<li class="nav-item">
|
||||
<a class="sk-nav-link nav-link nav-more-item-mobile-items" href="{{ pathto(pathname) }}">{{ title }}</a>
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<script type="text/javascript" src="{{ pathto('_static/doctools.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/language_data.js', 1) }}"></script>
|
||||
<script type="text/javascript" src="{{ pathto('_static/searchtools.js', 1) }}"></script>
|
||||
<!-- <script type="text/javascript" src="{{ pathto('_static/sphinx_highlight.js', 1) }}"></script> -->
|
||||
<script type="text/javascript" src="{{ pathto('_static/sphinx_highlight.js', 1) }}"></script>
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
if (!Search.out) {
|
||||
|
||||
3194
docs/data/people.yml
Normal file
3194
docs/data/people.yml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,155 +1,50 @@
|
||||
# Tutorials
|
||||
|
||||
Below are links to tutorials and courses on LangChain. For written guides on common use cases for LangChain, check out the [use cases guides](/docs/use_cases).
|
||||
## Books and Handbooks
|
||||
|
||||
⛓ icon marks a new addition [last update 2024-02-06]
|
||||
- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
|
||||
|
||||
---------------------
|
||||
|
||||
### [LangChain](https://en.wikipedia.org/wiki/LangChain) on Wikipedia
|
||||
|
||||
### Books
|
||||
|
||||
#### [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
|
||||
|
||||
|
||||
### DeepLearning.AI courses
|
||||
by [Harrison Chase](https://en.wikipedia.org/wiki/LangChain) and [Andrew Ng](https://en.wikipedia.org/wiki/Andrew_Ng)
|
||||
- [LangChain for LLM Application Development](https://learn.deeplearning.ai/langchain)
|
||||
- [LangChain Chat with Your Data](https://learn.deeplearning.ai/langchain-chat-with-your-data)
|
||||
- [Functions, Tools and Agents with LangChain](https://learn.deeplearning.ai/functions-tools-agents-langchain)
|
||||
|
||||
### Handbook
|
||||
[LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
|
||||
|
||||
⛓ [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
|
||||
|
||||
### Short Tutorials
|
||||
[LangChain Explained in 13 Minutes | QuickStart Tutorial for Beginners](https://youtu.be/aywZrzNaKjs) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
|
||||
|
||||
[LangChain Crash Course: Build an AutoGPT app in 25 minutes](https://youtu.be/MlK6SIjcjE8) by [Nicholas Renotte](https://www.youtube.com/@NicholasRenotte)
|
||||
|
||||
[LangChain Crash Course - Build apps with language models](https://youtu.be/LbT1yp6quS8) by [Patrick Loeber](https://www.youtube.com/@patloeber)
|
||||
|
||||
⛓ [LangChain 101 Course](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb) by **Ivan Reznikov**
|
||||
|
||||
## Tutorials
|
||||
|
||||
### [LangChain for Gen AI and LLMs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F) by [James Briggs](https://www.youtube.com/@jamesbriggs)
|
||||
- #1 [Getting Started with `GPT-3` vs. Open Source LLMs](https://youtu.be/nE2skSRWTTs)
|
||||
- #2 [Prompt Templates for `GPT 3.5` and other LLMs](https://youtu.be/RflBcK0oDH0)
|
||||
- #3 [LLM Chains using `GPT 3.5` and other LLMs](https://youtu.be/S8j9Tk0lZHU)
|
||||
- [LangChain Data Loaders, Tokenizers, Chunking, and Datasets - Data Prep 101](https://youtu.be/eqOfr4AGLk8)
|
||||
- #4 [Chatbot Memory for `Chat-GPT`, `Davinci` + other LLMs](https://youtu.be/X05uK0TZozM)
|
||||
- #5 [Chat with OpenAI in LangChain](https://youtu.be/CnAgB3A5OlU)
|
||||
- #6 [Fixing LLM Hallucinations with Retrieval Augmentation in LangChain](https://youtu.be/kvdVduIJsc8)
|
||||
- #7 [LangChain Agents Deep Dive with `GPT 3.5`](https://youtu.be/jSP-gSEyVeI)
|
||||
- #8 [Create Custom Tools for Chatbots in LangChain](https://youtu.be/q-HNphrWsDE)
|
||||
- #9 [Build Conversational Agents with Vector DBs](https://youtu.be/H6bCqqw9xyI)
|
||||
- [Using NEW `MPT-7B` in Hugging Face and LangChain](https://youtu.be/DXpk9K7DgMo)
|
||||
- [`MPT-30B` Chatbot with LangChain](https://youtu.be/pnem-EhT6VI)
|
||||
- [Fine-tuning OpenAI's `GPT 3.5` for LangChain Agents](https://youtu.be/boHXgQ5eQic?si=OOOfK-GhsgZGBqSr)
|
||||
- [Chatbots with `RAG`: LangChain Full Walkthrough](https://youtu.be/LhnCsygAvzY?si=N7k6xy4RQksbWwsQ)
|
||||
### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)
|
||||
### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)
|
||||
### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)
|
||||
### [by Prompt Engineering](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr)
|
||||
### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)
|
||||
### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)
|
||||
|
||||
|
||||
### [LangChain 101](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5) by [Greg Kamradt (Data Indy)](https://www.youtube.com/@DataIndependent)
|
||||
- [What Is LangChain? - LangChain + `ChatGPT` Overview](https://youtu.be/_v_fgW2SkkQ)
|
||||
- [Quickstart Guide](https://youtu.be/kYRB-vJFy38)
|
||||
- [Beginner's Guide To 7 Essential Concepts](https://youtu.be/2xxziIWmaSA)
|
||||
- [Beginner's Guide To 9 Use Cases](https://youtu.be/vGP4pQdCocw)
|
||||
- [Agents Overview + Google Searches](https://youtu.be/Jq9Sf68ozk0)
|
||||
- [`OpenAI` + `Wolfram Alpha`](https://youtu.be/UijbzCIJ99g)
|
||||
- [Ask Questions On Your Custom (or Private) Files](https://youtu.be/EnT-ZTrcPrg)
|
||||
- [Connect `Google Drive Files` To `OpenAI`](https://youtu.be/IqqHqDcXLww)
|
||||
- [`YouTube Transcripts` + `OpenAI`](https://youtu.be/pNcQ5XXMgH4)
|
||||
- [Question A 300 Page Book (w/ `OpenAI` + `Pinecone`)](https://youtu.be/h0DHDp1FbmQ)
|
||||
- [Workaround `OpenAI's` Token Limit With Chain Types](https://youtu.be/f9_BWhCI4Zo)
|
||||
- [Build Your Own OpenAI + LangChain Web App in 23 Minutes](https://youtu.be/U_eV8wfMkXU)
|
||||
- [Working With The New `ChatGPT API`](https://youtu.be/e9P7FLi5Zy8)
|
||||
- [OpenAI + LangChain Wrote Me 100 Custom Sales Emails](https://youtu.be/y1pyAQM-3Bo)
|
||||
- [Structured Output From `OpenAI` (Clean Dirty Data)](https://youtu.be/KwAXfey-xQk)
|
||||
- [Connect `OpenAI` To +5,000 Tools (LangChain + `Zapier`)](https://youtu.be/7tNm0yiDigU)
|
||||
- [Use LLMs To Extract Data From Text (Expert Mode)](https://youtu.be/xZzvwR9jdPA)
|
||||
- [Extract Insights From Interview Transcripts Using LLMs](https://youtu.be/shkMOHwJ4SM)
|
||||
- [5 Levels Of LLM Summarizing: Novice to Expert](https://youtu.be/qaPMdcCqtWk)
|
||||
- [Control Tone & Writing Style Of Your LLM Output](https://youtu.be/miBG-a3FuhU)
|
||||
- [Build Your Own `AI Twitter Bot` Using LLMs](https://youtu.be/yLWLDjT01q8)
|
||||
- [ChatGPT made my interview questions for me (`Streamlit` + LangChain)](https://youtu.be/zvoAMx0WKkw)
|
||||
- [Function Calling via ChatGPT API - First Look With LangChain](https://youtu.be/0-zlUy7VUjg)
|
||||
- [Extract Topics From Video/Audio With LLMs (Topic Modeling w/ LangChain)](https://youtu.be/pEkxRQFNAs4)
|
||||
## Courses
|
||||
|
||||
### Featured courses on Deeplearning.AI
|
||||
|
||||
### [LangChain How to and guides](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ) by [Sam Witteveen](https://www.youtube.com/@samwitteveenai)
|
||||
- [LangChain Basics - LLMs & PromptTemplates with Colab](https://youtu.be/J_0qvRt4LNk)
|
||||
- [LangChain Basics - Tools and Chains](https://youtu.be/hI2BY7yl_Ac)
|
||||
- [`ChatGPT API` Announcement & Code Walkthrough with LangChain](https://youtu.be/phHqvLHCwH4)
|
||||
- [Conversations with Memory (explanation & code walkthrough)](https://youtu.be/X550Zbz_ROE)
|
||||
- [Chat with `Flan20B`](https://youtu.be/VW5LBavIfY4)
|
||||
- [Using `Hugging Face Models` locally (code walkthrough)](https://youtu.be/Kn7SX2Mx_Jk)
|
||||
- [`PAL`: Program-aided Language Models with LangChain code](https://youtu.be/dy7-LvDu-3s)
|
||||
- [Building a Summarization System with LangChain and `GPT-3` - Part 1](https://youtu.be/LNq_2s_H01Y)
|
||||
- [Building a Summarization System with LangChain and `GPT-3` - Part 2](https://youtu.be/d-yeHDLgKHw)
|
||||
- [Microsoft's `Visual ChatGPT` using LangChain](https://youtu.be/7YEiEyfPF5U)
|
||||
- [LangChain Agents - Joining Tools and Chains with Decisions](https://youtu.be/ziu87EXZVUE)
|
||||
- [Comparing LLMs with LangChain](https://youtu.be/rFNG0MIEuW0)
|
||||
- [Using `Constitutional AI` in LangChain](https://youtu.be/uoVqNFDwpX4)
|
||||
- [Talking to `Alpaca` with LangChain - Creating an Alpaca Chatbot](https://youtu.be/v6sF8Ed3nTE)
|
||||
- [Talk to your `CSV` & `Excel` with LangChain](https://youtu.be/xQ3mZhw69bc)
|
||||
- [`BabyAGI`: Discover the Power of Task-Driven Autonomous Agents!](https://youtu.be/QBcDLSE2ERA)
|
||||
- [Improve your `BabyAGI` with LangChain](https://youtu.be/DRgPyOXZ-oE)
|
||||
- [Master `PDF` Chat with LangChain - Your essential guide to queries on documents](https://youtu.be/ZzgUqFtxgXI)
|
||||
- [Using LangChain with `DuckDuckGO`, `Wikipedia` & `PythonREPL` Tools](https://youtu.be/KerHlb8nuVc)
|
||||
- [Building Custom Tools and Agents with LangChain (gpt-3.5-turbo)](https://youtu.be/biS8G8x8DdA)
|
||||
- [LangChain Retrieval QA Over Multiple Files with `ChromaDB`](https://youtu.be/3yPBVii7Ct0)
|
||||
- [LangChain Retrieval QA with Instructor Embeddings & `ChromaDB` for PDFs](https://youtu.be/cFCGUjc33aU)
|
||||
- [LangChain + Retrieval Local LLMs for Retrieval QA - No OpenAI!!!](https://youtu.be/9ISVjh8mdlA)
|
||||
- [`Camel` + LangChain for Synthetic Data & Market Research](https://youtu.be/GldMMK6-_-g)
|
||||
- [Information Extraction with LangChain & `Kor`](https://youtu.be/SW1ZdqH0rRQ)
|
||||
- [Converting a LangChain App from OpenAI to OpenSource](https://youtu.be/KUDn7bVyIfc)
|
||||
- [Using LangChain `Output Parsers` to get what you want out of LLMs](https://youtu.be/UVn2NroKQCw)
|
||||
- [Building a LangChain Custom Medical Agent with Memory](https://youtu.be/6UFtRwWnHws)
|
||||
- [Understanding `ReACT` with LangChain](https://youtu.be/Eug2clsLtFs)
|
||||
- [`OpenAI Functions` + LangChain : Building a Multi Tool Agent](https://youtu.be/4KXK6c6TVXQ)
|
||||
- [What can you do with 16K tokens in LangChain?](https://youtu.be/z2aCZBAtWXs)
|
||||
- [Tagging and Extraction - Classification using `OpenAI Functions`](https://youtu.be/a8hMgIcUEnE)
|
||||
- [HOW to Make Conversational Form with LangChain](https://youtu.be/IT93On2LB5k)
|
||||
- [`Claude-2` meets LangChain!](https://youtu.be/Hb_D3p0bK2U?si=j96Kc7oJoeRI5-iC)
|
||||
- [`PaLM 2` Meets LangChain](https://youtu.be/orPwLibLqm4?si=KgJjpEbAD9YBPqT4)
|
||||
- [`LLaMA2` with LangChain - Basics | LangChain TUTORIAL](https://youtu.be/cIRzwSXB4Rc?si=v3Hwxk1m3fksBIHN)
|
||||
- [Serving `LLaMA2` with `Replicate`](https://youtu.be/JIF4nNi26DE?si=dSazFyC4UQmaR-rJ)
|
||||
- [NEW LangChain Expression Language](https://youtu.be/ud7HJ2p3gp0?si=8pJ9O6hGbXrCX5G9)
|
||||
- [Building a RCI Chain for Agents with LangChain Expression Language](https://youtu.be/QaKM5s0TnsY?si=0miEj-o17AHcGfLG)
|
||||
- [How to Run `LLaMA-2-70B` on the `Together AI`](https://youtu.be/Tc2DHfzHeYE?si=Xku3S9dlBxWQukpe)
|
||||
- [`RetrievalQA` with `LLaMA 2 70b` & `Chroma` DB](https://youtu.be/93yueQQnqpM?si=ZMwj-eS_CGLnNMXZ)
|
||||
- [How to use `BGE Embeddings` for LangChain](https://youtu.be/sWRvSG7vL4g?si=85jnvnmTCF9YIWXI)
|
||||
- [How to use Custom Prompts for `RetrievalQA` on `LLaMA-2 7B`](https://youtu.be/PDwUKves9GY?si=sMF99TWU0p4eiK80)
|
||||
- [LangChain for LLM Application Development](https://learn.deeplearning.ai/langchain)
|
||||
- [LangChain Chat with Your Data](https://learn.deeplearning.ai/langchain-chat-with-your-data)
|
||||
- [Functions, Tools and Agents with LangChain](https://learn.deeplearning.ai/functions-tools-agents-langchain)
|
||||
- [Build LLM Apps with LangChain.js](https://learn.deeplearning.ai/courses/build-llm-apps-with-langchain-js)
|
||||
|
||||
### Online courses
|
||||
|
||||
### [LangChain](https://www.youtube.com/playlist?list=PLVEEucA9MYhOu89CX8H3MBZqayTbcCTMr) by [Prompt Engineering](https://www.youtube.com/@engineerprompt)
|
||||
- [LangChain Crash Course — All You Need to Know to Build Powerful Apps with LLMs](https://youtu.be/5-fc4Tlgmro)
|
||||
- [Working with MULTIPLE `PDF` Files in LangChain: `ChatGPT` for your Data](https://youtu.be/s5LhRdh5fu4)
|
||||
- [`ChatGPT` for YOUR OWN `PDF` files with LangChain](https://youtu.be/TLf90ipMzfE)
|
||||
- [Talk to YOUR DATA without OpenAI APIs: LangChain](https://youtu.be/wrD-fZvT6UI)
|
||||
- [LangChain: `PDF` Chat App (GUI) | `ChatGPT` for Your `PDF` FILES](https://youtu.be/RIWbalZ7sTo)
|
||||
- [`LangFlow`: Build Chatbots without Writing Code](https://youtu.be/KJ-ux3hre4s)
|
||||
- [LangChain: Giving Memory to LLMs](https://youtu.be/dxO6pzlgJiY)
|
||||
- [BEST OPEN Alternative to `OPENAI's EMBEDDINGs` for Retrieval QA: LangChain](https://youtu.be/ogEalPMUCSY)
|
||||
- [LangChain: Run Language Models Locally - `Hugging Face Models`](https://youtu.be/Xxxuw4_iCzw)
|
||||
- [Slash API Costs: Mastering Caching for LLM Applications](https://youtu.be/EQOznhaJWR0?si=AXoI7f3-SVFRvQUl)
|
||||
- [Avoid PROMPT INJECTION with `Constitutional AI` - LangChain](https://youtu.be/tyKSkPFHVX8?si=9mgcB5Y1kkotkBGB)
|
||||
- [Udemy](https://www.udemy.com/courses/search/?q=langchain)
|
||||
- [Pluralsight](https://www.pluralsight.com/search?q=langchain)
|
||||
- [Coursera](https://www.coursera.org/search?query=langchain)
|
||||
- [Maven](https://maven.com/courses?query=langchain)
|
||||
- [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)
|
||||
- [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)
|
||||
- [edX](https://www.edx.org/search?q=langchain)
|
||||
|
||||
## Short Tutorials
|
||||
|
||||
### LangChain by [Chat with data](https://www.youtube.com/@chatwithdata)
|
||||
- [LangChain Beginner's Tutorial for `Typescript`/`Javascript`](https://youtu.be/bH722QgRlhQ)
|
||||
- [`GPT-4` Tutorial: How to Chat With Multiple `PDF` Files (~1000 pages of Tesla's 10-K Annual Reports)](https://youtu.be/Ix9WIZpArm0)
|
||||
- [`GPT-4` & LangChain Tutorial: How to Chat With A 56-Page `PDF` Document (w/`Pinecone`)](https://youtu.be/ih9PBGVVOO4)
|
||||
- [LangChain & `Supabase` Tutorial: How to Build a ChatGPT Chatbot For Your Website](https://youtu.be/R2FMzcsmQY8)
|
||||
- [LangChain Agents: Build Personal Assistants For Your Data (Q&A with Harrison Chase and Mayo Oshin)](https://youtu.be/gVkF8cwfBLI)
|
||||
|
||||
|
||||
### Codebase Analysis
|
||||
- [Codebase Analysis: Langchain Agents](https://carbonated-yacht-2c5.notion.site/Codebase-Analysis-Langchain-Agents-0b0587acd50647ca88aaae7cff5df1f2)
|
||||
- [by Nicholas Renotte](https://youtu.be/MlK6SIjcjE8)
|
||||
- [by Patrick Loeber](https://youtu.be/LbT1yp6quS8)
|
||||
- [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)
|
||||
- [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)
|
||||
|
||||
## [Documentation: Use cases](/docs/use_cases)
|
||||
|
||||
---------------------
|
||||
⛓ icon marks a new addition [last update 2024-02-061]
|
||||
|
||||
|
||||
|
||||
@@ -3,24 +3,68 @@ sidebar_position: 3
|
||||
---
|
||||
# Contribute Documentation
|
||||
|
||||
The docs directory contains Documentation and API Reference.
|
||||
LangChain documentation consists of two components:
|
||||
|
||||
Documentation is built using [Quarto](https://quarto.org) and [Docusaurus 2](https://docusaurus.io/).
|
||||
1. Main Documentation: Hosted at [python.langchain.com](https://python.langchain.com/),
|
||||
this comprehensive resource serves as the primary user-facing documentation.
|
||||
It covers a wide array of topics, including tutorials, use cases, integrations,
|
||||
and more, offering extensive guidance on building with LangChain.
|
||||
The content for this documentation lives in the `/docs` directory of the monorepo.
|
||||
2. In-code Documentation: This is documentation of the codebase itself, which is also
|
||||
used to generate the externally facing [API Reference](https://api.python.langchain.com/en/latest/langchain_api_reference.html).
|
||||
The content for the API reference is autogenerated by scanning the docstrings in the codebase. For this reason we ask that
|
||||
developers document their code well.
|
||||
|
||||
API Reference are largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code and are hosted by [Read the Docs](https://readthedocs.org/).
|
||||
For that reason, we ask that you add good documentation to all classes and methods.
|
||||
The main documentation is built using [Quarto](https://quarto.org) and [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
Similar to linting, we recognize documentation can be annoying. If you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
The `API Reference` is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/)
|
||||
from the code and is hosted by [Read the Docs](https://readthedocs.org/).
|
||||
|
||||
## Build Documentation Locally
|
||||
We appreciate all contributions to the documentation, whether it be fixing a typo,
|
||||
adding a new tutorial or example and whether it be in the main documentation or the API Reference.
|
||||
|
||||
Similar to linting, we recognize documentation can be annoying. If you do not want
|
||||
to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
|
||||
|
||||
## 📜 Main Documentation
|
||||
|
||||
The content for the main documentation is located in the `/docs` directory of the monorepo.
|
||||
|
||||
The documentation is written using a combination of ipython notebooks (`.ipynb` files)
|
||||
and markdown (`.mdx` files). The notebooks are converted to markdown
|
||||
using [Quarto](https://quarto.org) and then built using [Docusaurus 2](https://docusaurus.io/).
|
||||
|
||||
Feel free to make contributions to the main documentation! 🥰
|
||||
|
||||
After modifying the documentation:
|
||||
|
||||
1. Run the linting and formatting commands (see below) to ensure that the documentation is well-formatted and free of errors.
|
||||
2. Optionally build the documentation locally to verify that the changes look good.
|
||||
3. Make a pull request with the changes.
|
||||
4. You can preview and verify that the changes are what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page. This will take you to a preview of the documentation changes.
|
||||
|
||||
## ⚒️ Linting and Building Documentation Locally
|
||||
|
||||
After writing up the documentation, you may want to lint and build the documentation
|
||||
locally to ensure that it looks good and is free of errors.
|
||||
|
||||
If you're unable to build it locally that's okay as well, as you will be able to
|
||||
see a preview of the documentation on the pull request page.
|
||||
|
||||
### Install dependencies
|
||||
|
||||
- [Quarto](https://quarto.org) - package that converts Jupyter notebooks (`.ipynb` files) into mdx files for serving in Docusaurus.
|
||||
- `poetry install` from the monorepo root
|
||||
- [Quarto](https://quarto.org) - package that converts Jupyter notebooks (`.ipynb` files) into mdx files for serving in Docusaurus. [Download link](https://quarto.org/docs/download/).
|
||||
|
||||
From the **monorepo root**, run the following command to install the dependencies:
|
||||
|
||||
```bash
|
||||
poetry install --with lint,docs --no-root
|
||||
````
|
||||
|
||||
### Building
|
||||
|
||||
The code that builds the documentation is located in the `/docs` directory of the monorepo.
|
||||
|
||||
In the following commands, the prefix `api_` indicates that those are operations for the API Reference.
|
||||
|
||||
Before building the documentation, it is always a good idea to clean the build directory:
|
||||
@@ -46,10 +90,9 @@ make api_docs_linkcheck
|
||||
|
||||
### Linting and Formatting
|
||||
|
||||
The docs are linted from the monorepo root. To lint the docs, run the following from there:
|
||||
The Main Documentation is linted from the **monorepo root**. To lint the main documentation, run the following from there:
|
||||
|
||||
```bash
|
||||
poetry install --with lint,typing
|
||||
make lint
|
||||
```
|
||||
|
||||
@@ -57,9 +100,73 @@ If you have formatting-related errors, you can fix them automatically with:
|
||||
|
||||
```bash
|
||||
make format
|
||||
```
|
||||
```
|
||||
|
||||
## Verify Documentation changes
|
||||
## ⌨️ In-code Documentation
|
||||
|
||||
The in-code documentation is largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code and is hosted by [Read the Docs](https://readthedocs.org/).
|
||||
|
||||
For the API reference to be useful, the codebase must be well-documented. This means that all functions, classes, and methods should have a docstring that explains what they do, what the arguments are, and what the return value is. This is a good practice in general, but it is especially important for LangChain because the API reference is the primary resource for developers to understand how to use the codebase.
|
||||
|
||||
We generally follow the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for docstrings.
|
||||
|
||||
Here is an example of a well-documented function:
|
||||
|
||||
```python
|
||||
|
||||
def my_function(arg1: int, arg2: str) -> float:
|
||||
"""This is a short description of the function. (It should be a single sentence.)
|
||||
|
||||
This is a longer description of the function. It should explain what
|
||||
the function does, what the arguments are, and what the return value is.
|
||||
It should wrap at 88 characters.
|
||||
|
||||
Examples:
|
||||
This is a section for examples of how to use the function.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
my_function(1, "hello")
|
||||
|
||||
Args:
|
||||
arg1: This is a description of arg1. We do not need to specify the type since
|
||||
it is already specified in the function signature.
|
||||
arg2: This is a description of arg2.
|
||||
|
||||
Returns:
|
||||
This is a description of the return value.
|
||||
"""
|
||||
return 3.14
|
||||
```
|
||||
|
||||
### Linting and Formatting
|
||||
|
||||
The in-code documentation is linted from the directories belonging to the packages
|
||||
being documented.
|
||||
|
||||
For example, if you're working on the `langchain-community` package, you would change
|
||||
the working directory to the `langchain-community` directory:
|
||||
|
||||
```bash
|
||||
cd [root]/libs/langchain-community
|
||||
```
|
||||
|
||||
Set up a virtual environment for the package if you haven't done so already.
|
||||
|
||||
Install the dependencies for the package.
|
||||
|
||||
```bash
|
||||
poetry install --with lint
|
||||
```
|
||||
|
||||
Then you can run the following commands to lint and format the in-code documentation:
|
||||
|
||||
```bash
|
||||
make format
|
||||
make lint
|
||||
```
|
||||
|
||||
## Verify Documentation Changes
|
||||
|
||||
After pushing documentation changes to the repository, you can preview and verify that the changes are
|
||||
what you wanted by clicking the `View deployment` or `Visit Preview` buttons on the pull request `Conversation` page.
|
||||
|
||||
@@ -15,8 +15,9 @@ There are many ways to contribute to LangChain. Here are some common ways people
|
||||
- [**Documentation**](./documentation.mdx): Help improve our docs, including this one!
|
||||
- [**Code**](./code.mdx): Help us write code, fix bugs, or improve our infrastructure.
|
||||
- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools.
|
||||
- [**Discussions**](https://github.com/langchain-ai/langchain/discussions): Help answer usage questions and discuss issues with users.
|
||||
|
||||
### 🚩GitHub Issues
|
||||
### 🚩 GitHub Issues
|
||||
|
||||
Our [issues](https://github.com/langchain-ai/langchain/issues) page is kept up to date with bugs, improvements, and feature requests.
|
||||
|
||||
@@ -31,7 +32,13 @@ We will try to keep these issues as up-to-date as possible, though
|
||||
with the rapid rate of development in this field some may get out of date.
|
||||
If you notice this happening, please let us know.
|
||||
|
||||
### 🙋Getting Help
|
||||
### 💭 GitHub Discussions
|
||||
|
||||
We have a [discussions](https://github.com/langchain-ai/langchain/discussions) page where users can ask usage questions, discuss design decisions, and propose new features.
|
||||
|
||||
If you are able to help answer questions, please do so! This will allow the maintainers to spend more time focused on development and bug fixing.
|
||||
|
||||
### 🙋 Getting Help
|
||||
|
||||
Our goal is to have the simplest developer setup possible. Should you experience any difficulty getting setup, please
|
||||
contact a maintainer! Not only do we want to help get you unblocked, but we also want to make sure that the process is
|
||||
|
||||
54
docs/docs/contributing/repo_structure.mdx
Normal file
54
docs/docs/contributing/repo_structure.mdx
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
sidebar_position: 0.5
|
||||
---
|
||||
# Repository Structure
|
||||
|
||||
If you plan on contributing to LangChain code or documentation, it can be useful
|
||||
to understand the high level structure of the repository.
|
||||
|
||||
LangChain is organized as a [monorep](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
|
||||
|
||||
Here's the structure visualized as a tree:
|
||||
|
||||
```text
|
||||
.
|
||||
├── cookbook # Tutorials and examples
|
||||
├── docs # Contains content for the documentation here: https://python.langchain.com/
|
||||
├── libs
|
||||
│ ├── langchain # Main package
|
||||
│ │ ├── tests/unit_tests # Unit tests (present in each package not shown for brevity)
|
||||
│ │ ├── tests/integration_tests # Integration tests (present in each package not shown for brevity)
|
||||
│ ├── langchain-community # Third-party integrations
|
||||
│ ├── langchain-core # Base interfaces for key abstractions
|
||||
│ ├── langchain-experimental # Experimental components and chains
|
||||
│ ├── partners
|
||||
│ ├── langchain-partner-1
|
||||
│ ├── langchain-partner-2
|
||||
│ ├── ...
|
||||
│
|
||||
├── templates # A collection of easily deployable reference architectures for a wide variety of tasks.
|
||||
```
|
||||
|
||||
The root directory also contains the following files:
|
||||
|
||||
* `pyproject.toml`: Dependencies for building docs and linting docs, cookbook.
|
||||
* `Makefile`: A file that contains shortcuts for building, linting and docs and cookbook.
|
||||
|
||||
There are other files in the root directory level, but their presence should be self-explanatory. Feel free to browse around!
|
||||
|
||||
## Documentation
|
||||
|
||||
The `/docs` directory contains the content for the documentation that is shown
|
||||
at https://python.langchain.com/ and the associated API Reference https://api.python.langchain.com/en/latest/langchain_api_reference.html.
|
||||
|
||||
See the [documentation](./documentation) guidelines to learn how to contribute to the documentation.
|
||||
|
||||
## Code
|
||||
|
||||
The `/libs` directory contains the code for the LangChain packages.
|
||||
|
||||
To learn more about how to contribute code see the following guidelines:
|
||||
|
||||
- [Code](./code.mdx) Learn how to develop in the LangChain codebase.
|
||||
- [Integrations](./integrations.mdx) to learn how to contribute to third-party integrations to langchain-community or to start a new partner package.
|
||||
- [Testing](./testing.mdx) guidelines to learn how to write tests for the packages.
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Agents\n",
|
||||
"\n",
|
||||
"You can pass a Runnable into an agent."
|
||||
"You can pass a Runnable into an agent. Make sure you have `langchainhub` installed: `pip install langchainhub`"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -98,7 +98,7 @@
|
||||
"source": [
|
||||
"Building an agent from a runnable usually involves a few things:\n",
|
||||
"\n",
|
||||
"1. Data processing for the intermediate steps. These need to represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
|
||||
"1. Data processing for the intermediate steps. These need to be represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
|
||||
"\n",
|
||||
"2. The prompt itself\n",
|
||||
"\n",
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
@@ -169,8 +169,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import format_document\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, get_buffer_string\n",
|
||||
"from langchain_core.prompts import format_document\n",
|
||||
"from langchain_core.runnables import RunnableParallel"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Add message history (memory)\n",
|
||||
"\n",
|
||||
"The `RunnableWithMessageHistory` let us add message history to certain types of chains.\n",
|
||||
"The `RunnableWithMessageHistory` lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it.\n",
|
||||
"\n",
|
||||
"Specifically, it can be used for any Runnable that takes as input one of\n",
|
||||
"\n",
|
||||
@@ -21,7 +21,379 @@
|
||||
"* a sequence of `BaseMessage`\n",
|
||||
"* a dict with a key that contains a sequence of `BaseMessage`\n",
|
||||
"\n",
|
||||
"Let's take a look at some examples to see how it works."
|
||||
"Let's take a look at some examples to see how it works. First we construct a runnable (which here accepts a dict as input and returns a message as output):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "2ed413b4-33a1-48ee-89b0-2d4917ec101a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You're an assistant who's good at {ability}. Respond in 20 words or fewer\",\n",
|
||||
" ),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"runnable = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9fd175e1-c7b8-4929-a57e-3331865fe7aa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To manage the message history, we will need:\n",
|
||||
"1. This runnable;\n",
|
||||
"2. A callable that returns an instance of `BaseChatMessageHistory`.\n",
|
||||
"\n",
|
||||
"Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using Redis and other providers. Here we demonstrate using an in-memory `ChatMessageHistory` as well as more persistent storage using `RedisChatMessageHistory`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3d83adad-9672-496d-9f25-5747e7b8c8bb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## In-memory\n",
|
||||
"\n",
|
||||
"Below we show a simple example in which the chat history lives in memory, in this case via a global Python dict.\n",
|
||||
"\n",
|
||||
"We construct a callable `get_session_history` that references this dict to return an instance of `ChatMessageHistory`. The arguments to the callable can be specified by passing a configuration to the `RunnableWithMessageHistory` at runtime. By default, the configuration parameter is expected to be a single string `session_id`. This can be adjusted via the `history_factory_config` kwarg.\n",
|
||||
"\n",
|
||||
"Using the single-parameter default:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "54348d02-d8ee-440c-bbf9-41bc0fbbc46c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"store = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = ChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" runnable,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01acb505-3fd3-4ab4-9f04-5ea07e81542e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we've specified `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to).\n",
|
||||
"\n",
|
||||
"When invoking this new runnable, we specify the corresponding chat history via a configuration parameter:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "01384412-f08e-4634-9edb-3f46f475b582",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Cosine is a trigonometric function that calculates the ratio of the adjacent side to the hypotenuse of a right triangle.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What does cosine mean?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "954688a2-9a3f-47ee-a9e8-fa0c83e69477",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Cosine is a mathematical function used to calculate the length of a side in a right triangle.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Remembers\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "39350d7c-2641-4744-bc2a-fd6a57c4ea90",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I can help with math problems. What do you need assistance with?')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# New session_id --> does not remember.\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"def234\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d29497be-3366-408d-bbb9-d4a8bf4ef37c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The configuration parameters by which we track message histories can be customized by passing in a list of ``ConfigurableFieldSpec`` objects to the ``history_factory_config`` parameter. Below, we use two parameters: a `user_id` and `conversation_id`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "1c89daee-deff-4fdf-86a3-178f7d8ef536",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import ConfigurableFieldSpec\n",
|
||||
"\n",
|
||||
"store = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if (user_id, conversation_id) not in store:\n",
|
||||
" store[(user_id, conversation_id)] = ChatMessageHistory()\n",
|
||||
" return store[(user_id, conversation_id)]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" runnable,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
" history_factory_config=[\n",
|
||||
" ConfigurableFieldSpec(\n",
|
||||
" id=\"user_id\",\n",
|
||||
" annotation=str,\n",
|
||||
" name=\"User ID\",\n",
|
||||
" description=\"Unique identifier for the user.\",\n",
|
||||
" default=\"\",\n",
|
||||
" is_shared=True,\n",
|
||||
" ),\n",
|
||||
" ConfigurableFieldSpec(\n",
|
||||
" id=\"conversation_id\",\n",
|
||||
" annotation=str,\n",
|
||||
" name=\"Conversation ID\",\n",
|
||||
" description=\"Unique identifier for the conversation.\",\n",
|
||||
" default=\"\",\n",
|
||||
" is_shared=True,\n",
|
||||
" ),\n",
|
||||
" ],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65c5622e-09b8-4f2f-8c8a-2dab0fd040fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"Hello\"},\n",
|
||||
" config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18f1a459-3f88-4ee6-8542-76a907070dd6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Examples with runnables of different signatures\n",
|
||||
"\n",
|
||||
"The above runnable takes a dict as input and returns a BaseMessage. Below we show some alternatives."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48eae1bf-b59d-4a61-8e62-b6dbf667e866",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Messages input, dict output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "17733d4f-3a32-4055-9d44-5d58b9446a26",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_message': AIMessage(content=\"Simone de Beauvoir believed in the existence of free will. She argued that individuals have the ability to make choices and determine their own actions, even in the face of social and cultural constraints. She rejected the idea that individuals are purely products of their environment or predetermined by biology or destiny. Instead, she emphasized the importance of personal responsibility and the need for individuals to actively engage in creating their own lives and defining their own existence. De Beauvoir believed that freedom and agency come from recognizing one's own freedom and actively exercising it in the pursuit of personal and collective liberation.\")}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.runnables import RunnableParallel\n",
|
||||
"\n",
|
||||
"chain = RunnableParallel({\"output_message\": ChatOpenAI()})\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = ChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_session_history,\n",
|
||||
" output_messages_key=\"output_message\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" [HumanMessage(content=\"What did Simone de Beauvoir believe about free will\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"baz\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "efb57ef5-91f9-426b-84b9-b77f071a9dd7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_message': AIMessage(content='Simone de Beauvoir\\'s views on free will were closely aligned with those of her contemporary and partner Jean-Paul Sartre. Both de Beauvoir and Sartre were existentialist philosophers who emphasized the importance of individual freedom and the rejection of determinism. They believed that human beings have the capacity to transcend their circumstances and create their own meaning and values.\\n\\nSartre, in his famous work \"Being and Nothingness,\" argued that human beings are condemned to be free, meaning that we are burdened with the responsibility of making choices and defining ourselves in a world that lacks inherent meaning. Like de Beauvoir, Sartre believed that individuals have the ability to exercise their freedom and make choices in the face of external and internal constraints.\\n\\nWhile there may be some nuanced differences in their philosophical writings, overall, de Beauvoir and Sartre shared a similar belief in the existence of free will and the importance of individual agency in shaping one\\'s own life.')}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with_message_history.invoke(\n",
|
||||
" [HumanMessage(content=\"How did this compare to Sartre\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"baz\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a39eac5f-a9d8-4729-be06-5e7faf0c424d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Messages input, messages output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e45bcd95-e31f-4a9a-967a-78f96e8da881",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"RunnableWithMessageHistory(\n",
|
||||
" ChatOpenAI(),\n",
|
||||
" get_session_history,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "04daa921-a2d1-40f9-8cd1-ae4e9a4163a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Dict with single key for all messages input, messages output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "27157f15-9fb0-4167-9870-f4d7f234b3cb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"RunnableWithMessageHistory(\n",
|
||||
" itemgetter(\"input_messages\") | ChatOpenAI(),\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input_messages\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "418ca7af-9ed9-478c-8bca-cba0de2ca61e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Persistent storage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "76799a13-d99a-4c4f-91f2-db699e40b8df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In many cases it is preferable to persist conversation histories. `RunnableWithMessageHistory` is agnostic as to how the `get_session_history` callable retrieves its chat message histories. See [here](https://github.com/langchain-ai/langserve/blob/main/examples/chat_with_persistence_and_user/server.py) for an example using a local filesystem. Below we demonstrate how one could use Redis. Check out the [memory integrations](https://integrations.langchain.com/memory) page for implementations of chat message histories using other providers."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -29,9 +401,9 @@
|
||||
"id": "6bca45e5-35d9-4603-9ca9-6ac0ce0e35cd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"### Setup\n",
|
||||
"\n",
|
||||
"We'll use Redis to store our chat message histories and Anthropic's claude-2 model so we'll need to install the following dependencies:"
|
||||
"We'll need to install Redis if it's not installed already:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -41,28 +413,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain redis anthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "93776323-d6b8-4912-bb6a-867c5e655f46",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set your [Anthropic API key](https://console.anthropic.com/):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c7f56f69-d2f1-4a21-990c-b5551eb012fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
|
||||
"%pip install --upgrade --quiet redis"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -78,7 +429,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 9,
|
||||
"id": "cd6a250e-17fe-4368-a39d-1fe6b2cbde68",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -110,77 +461,32 @@
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1a5a632e-ba9e-4488-b586-640ad5494f62",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: Dict input, message output\n",
|
||||
"\n",
|
||||
"Let's create a simple chain that takes a dict as input and returns a BaseMessage.\n",
|
||||
"\n",
|
||||
"In this case the `\"question\"` key in the input represents our input message, and the `\"history\"` key is where our historical messages will be injected."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2a150d6f-8878-4950-8634-a608c5faad56",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_message_histories import RedisChatMessageHistory\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3185edba-4eb6-4b32-80c6-577c0d19af97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You're an assistant who's good at {ability}\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" (\"human\", \"{question}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatAnthropic(model=\"claude-2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f9d81796-ce61-484c-89e2-6c567d5e54ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Adding message history\n",
|
||||
"\n",
|
||||
"To add message history to our original chain we wrap it in the `RunnableWithMessageHistory` class.\n",
|
||||
"\n",
|
||||
"Crucially, we also need to define a method that takes a session_id string and based on it returns a `BaseChatMessageHistory`. Given the same input, this method should return an equivalent output.\n",
|
||||
"\n",
|
||||
"In this case we'll also want to specify `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to)."
|
||||
"Updating the message history implementation just requires us to define a new callable, this time returning an instance of `RedisChatMessageHistory`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 10,
|
||||
"id": "ca7c64d8-e138-4ef8-9734-f82076c47d80",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain_with_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",
|
||||
" input_messages_key=\"question\",\n",
|
||||
"from langchain_community.chat_message_histories import RedisChatMessageHistory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_message_history(session_id: str) -> RedisChatMessageHistory:\n",
|
||||
" return RedisChatMessageHistory(session_id, url=REDIS_URL)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with_message_history = RunnableWithMessageHistory(\n",
|
||||
" runnable,\n",
|
||||
" get_message_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"history\",\n",
|
||||
")"
|
||||
]
|
||||
@@ -190,60 +496,53 @@
|
||||
"id": "37eefdec-9901-4650-b64c-d3c097ed5f4d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invoking with config\n",
|
||||
"\n",
|
||||
"Whenever we call our chain with message history, we need to include a config that contains the `session_id`\n",
|
||||
"```python\n",
|
||||
"config={\"configurable\": {\"session_id\": \"<SESSION_ID>\"}}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Given the same configuration, our chain should be pulling from the same chat message history."
|
||||
"We can invoke as before:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 11,
|
||||
"id": "a85bcc22-ca4c-4ad5-9440-f94be7318f3e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Cosine is one of the basic trigonometric functions in mathematics. It is defined as the ratio of the adjacent side to the hypotenuse in a right triangle.\\n\\nSome key properties and facts about cosine:\\n\\n- It is denoted by cos(θ), where θ is the angle in a right triangle. \\n\\n- The cosine of an acute angle is always positive. For angles greater than 90 degrees, cosine can be negative.\\n\\n- Cosine is one of the three main trig functions along with sine and tangent.\\n\\n- The cosine of 0 degrees is 1. As the angle increases towards 90 degrees, the cosine value decreases towards 0.\\n\\n- The range of values for cosine is -1 to 1.\\n\\n- The cosine function maps angles in a circle to the x-coordinate on the unit circle.\\n\\n- Cosine is used to find adjacent side lengths in right triangles, and has many other applications in mathematics, physics, engineering and more.\\n\\n- Key cosine identities include: cos(A+B) = cosAcosB − sinAsinB and cos(2A) = cos^2(A) − sin^2(A)\\n\\nSo in summary, cosine is a fundamental trig')"
|
||||
"AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse in a right triangle.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"question\": \"What does cosine mean?\"},\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What does cosine mean?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"foobar\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 12,
|
||||
"id": "ab29abd3-751f-41ce-a1b0-53f6b565e79d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' The inverse of the cosine function is called the arccosine or inverse cosine, often denoted as cos-1(x) or arccos(x).\\n\\nThe key properties and facts about arccosine:\\n\\n- It is defined as the angle θ between 0 and π radians whose cosine is x. So arccos(x) = θ such that cos(θ) = x.\\n\\n- The range of arccosine is 0 to π radians (0 to 180 degrees).\\n\\n- The domain of arccosine is -1 to 1. \\n\\n- arccos(cos(θ)) = θ for values of θ from 0 to π radians.\\n\\n- arccos(x) is the angle in a right triangle whose adjacent side is x and hypotenuse is 1.\\n\\n- arccos(0) = 90 degrees. As x increases from 0 to 1, arccos(x) decreases from 90 to 0 degrees.\\n\\n- arccos(1) = 0 degrees. arccos(-1) = 180 degrees.\\n\\n- The graph of y = arccos(x) is part of the unit circle, restricted to x')"
|
||||
"AIMessage(content='The inverse of cosine is the arccosine function, denoted as acos or cos^-1, which gives the angle corresponding to a given cosine value.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"question\": \"What's its inverse\"},\n",
|
||||
"with_message_history.invoke(\n",
|
||||
" {\"ability\": \"math\", \"input\": \"What's its inverse\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"foobar\"}},\n",
|
||||
")"
|
||||
]
|
||||
@@ -255,7 +554,7 @@
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[Langsmith trace](https://smith.langchain.com/public/863a003b-7ca8-4b24-be9e-d63ec13c106e/r)\n",
|
||||
"[Langsmith trace](https://smith.langchain.com/public/bd73e122-6ec1-48b2-82df-e6483dc9cb63/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
@@ -267,124 +566,13 @@
|
||||
"source": [
|
||||
"Looking at the Langsmith trace for the second call, we can see that when constructing the prompt, a \"history\" variable has been injected which is a list of two messages (our first input and first output)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "028cf151-6cd5-4533-b3cf-c8d735554647",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example: messages input, dict output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "0bb446b5-6251-45fe-a92a-4c6171473c53",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_message': AIMessage(content=' Here is a summary of Simone de Beauvoir\\'s views on free will:\\n\\n- De Beauvoir was an existentialist philosopher and believed strongly in the concept of free will. She rejected the idea that human nature or instincts determine behavior.\\n\\n- Instead, de Beauvoir argued that human beings define their own essence or nature through their actions and choices. As she famously wrote, \"One is not born, but rather becomes, a woman.\"\\n\\n- De Beauvoir believed that while individuals are situated in certain cultural contexts and social conditions, they still have agency and the ability to transcend these situations. Freedom comes from choosing one\\'s attitude toward these constraints.\\n\\n- She emphasized the radical freedom and responsibility of the individual. We are \"condemned to be free\" because we cannot escape making choices and taking responsibility for our choices. \\n\\n- De Beauvoir felt that many people evade their freedom and responsibility by adopting rigid mindsets, ideologies, or conforming uncritically to social roles.\\n\\n- She advocated for the recognition of ambiguity in the human condition and warned against the quest for absolute rules that deny freedom and responsibility. Authentic living involves embracing ambiguity.\\n\\nIn summary, de Beauvoir promoted an existential ethics')}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.runnables import RunnableParallel\n",
|
||||
"\n",
|
||||
"chain = RunnableParallel({\"output_message\": ChatAnthropic(model=\"claude-2\")})\n",
|
||||
"chain_with_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",
|
||||
" output_messages_key=\"output_message\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain_with_history.invoke(\n",
|
||||
" [HumanMessage(content=\"What did Simone de Beauvoir believe about free will\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"baz\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "601ce3ff-aea8-424d-8e54-fd614256af4f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_message': AIMessage(content=\" There are many similarities between Simone de Beauvoir's views on free will and those of Jean-Paul Sartre, though some key differences emerge as well:\\n\\nSimilarities with Sartre:\\n\\n- Both were existentialist thinkers who rejected determinism and emphasized human freedom and responsibility.\\n\\n- They agreed that existence precedes essence - there is no predefined human nature that determines who we are.\\n\\n- Individuals must define themselves through their choices and actions. This leads to anxiety but also freedom.\\n\\n- The human condition is characterized by ambiguity and uncertainty, rather than fixed meanings/values.\\n\\n- Both felt that most people evade their freedom through self-deception, conformity, or adopting collective identities/values uncritically.\\n\\nDifferences from Sartre: \\n\\n- Sartre placed more emphasis on the burden and anguish of radical freedom. De Beauvoir focused more on its positive potential.\\n\\n- De Beauvoir critiqued Sartre's premise that human relations are necessarily conflictual. She saw more potential for mutual recognition.\\n\\n- Sartre saw the Other's gaze as a threat to freedom. De Beauvoir put more stress on how the Other's gaze can confirm\")}"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_history.invoke(\n",
|
||||
" [HumanMessage(content=\"How did this compare to Sartre\")],\n",
|
||||
" config={\"configurable\": {\"session_id\": \"baz\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b898d1b1-11e6-4d30-a8dd-cc5e45533611",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"[LangSmith trace](https://smith.langchain.com/public/f6c3e1d1-a49d-4955-a9fa-c6519df74fa7/r)\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1724292c-01c6-44bb-83e8-9cdb6bf01483",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## More examples\n",
|
||||
"\n",
|
||||
"We could also do any of the below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fd89240b-5a25-48f8-9568-5c1127f9ffad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"# messages in, messages out\n",
|
||||
"RunnableWithMessageHistory(\n",
|
||||
" ChatAnthropic(model=\"claude-2\"),\n",
|
||||
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# dict with single key for all messages in, messages out\n",
|
||||
"RunnableWithMessageHistory(\n",
|
||||
" itemgetter(\"input_messages\") | ChatAnthropic(model=\"claude-2\"),\n",
|
||||
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",
|
||||
" input_messages_key=\"input_messages\",\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -396,7 +584,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"cell_type": "raw",
|
||||
"id": "9e45e81c-e16e-4c6c-b6a3-2362e5193827",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
@@ -25,53 +25,42 @@
|
||||
"\n",
|
||||
"There are two ways to perform routing:\n",
|
||||
"\n",
|
||||
"1. Using a `RunnableBranch`.\n",
|
||||
"2. Writing custom factory function that takes the input of a previous step and returns a **runnable**. Importantly, this should return a **runnable** and NOT actually execute.\n",
|
||||
"1. Conditionally return runnables from a [`RunnableLambda`](./functions) (recommended)\n",
|
||||
"2. Using a `RunnableBranch`.\n",
|
||||
"\n",
|
||||
"We'll illustrate both methods using a two step sequence where the first step classifies an input question as being about `LangChain`, `Anthropic`, or `Other`, then routes to a corresponding prompt chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f885113d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a RunnableBranch\n",
|
||||
"\n",
|
||||
"A `RunnableBranch` is initialized with a list of (condition, runnable) pairs and a default runnable. It selects which branch by passing each condition the input it's invoked with. It selects the first condition to evaluate to True, and runs the corresponding runnable to that condition with the input. \n",
|
||||
"\n",
|
||||
"If no provided conditions match, it runs the default runnable.\n",
|
||||
"\n",
|
||||
"Here's an example of what it looks like in action:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "1aa13c1d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ed84c59a",
|
||||
"id": "c1c6edac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example Setup\n",
|
||||
"First, let's create a chain that will identify incoming questions as being about `LangChain`, `Anthropic`, or `Other`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "3ec03886",
|
||||
"execution_count": null,
|
||||
"id": "8a8a1967",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Anthropic'"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" PromptTemplate.from_template(\n",
|
||||
" \"\"\"Given the user question below, classify it as either being about `LangChain`, `Anthropic`, or `Other`.\n",
|
||||
@@ -86,33 +75,14 @@
|
||||
" )\n",
|
||||
" | ChatAnthropic()\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "87ae7c1c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Anthropic'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
")\n",
|
||||
"\n",
|
||||
"chain.invoke({\"question\": \"how do I call Anthropic?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8aa0a365",
|
||||
"id": "7655555f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's create three sub chains:"
|
||||
@@ -120,8 +90,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d479962a",
|
||||
"execution_count": null,
|
||||
"id": "89d7722d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -158,101 +128,12 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "593eab06",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableBranch\n",
|
||||
"\n",
|
||||
"branch = RunnableBranch(\n",
|
||||
" (lambda x: \"anthropic\" in x[\"topic\"].lower(), anthropic_chain),\n",
|
||||
" (lambda x: \"langchain\" in x[\"topic\"].lower(), langchain_chain),\n",
|
||||
" general_chain,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "752c732e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"full_chain = {\"topic\": chain, \"question\": lambda x: x[\"question\"]} | branch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "29231bb8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" As Dario Amodei told me, here are some ways to use Anthropic:\\n\\n- Sign up for an account on Anthropic's website to access tools like Claude, Constitutional AI, and Writer. \\n\\n- Use Claude for tasks like email generation, customer service chat, and QA. Claude can understand natural language prompts and provide helpful responses.\\n\\n- Use Constitutional AI if you need an AI assistant that is harmless, honest, and helpful. It is designed to be safe and aligned with human values.\\n\\n- Use Writer to generate natural language content for things like marketing copy, stories, reports, and more. Give it a topic and prompt and it will create high-quality written content.\\n\\n- Check out Anthropic's documentation and blog for tips, tutorials, examples, and announcements about new capabilities as they continue to develop their AI technology.\\n\\n- Follow Anthropic on social media or subscribe to their newsletter to stay up to date on new features and releases.\\n\\n- For most people, the easiest way to leverage Anthropic's technology is through their website - just create an account to get started!\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"how do I use Anthropic?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c67d8733",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' As Harrison Chase told me, here is how you use LangChain:\\n\\nLangChain is an AI assistant that can have conversations, answer questions, and generate text. To use LangChain, you simply type or speak your input and LangChain will respond. \\n\\nYou can ask LangChain questions, have discussions, get summaries or explanations about topics, and request it to generate text on a subject. Some examples of interactions:\\n\\n- Ask general knowledge questions and LangChain will try to answer factually. For example \"What is the capital of France?\"\\n\\n- Have conversations on topics by taking turns speaking. You can prompt the start of a conversation by saying something like \"Let\\'s discuss machine learning\"\\n\\n- Ask for summaries or high-level explanations on subjects. For example \"Can you summarize the main themes in Shakespeare\\'s Hamlet?\" \\n\\n- Give creative writing prompts or requests to have LangChain generate text in different styles. For example \"Write a short children\\'s story about a mouse\" or \"Generate a poem in the style of Robert Frost about nature\"\\n\\n- Correct LangChain if it makes an inaccurate statement and provide the right information. This helps train it.\\n\\nThe key is interacting naturally and giving it clear prompts and requests', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"how do I use LangChain?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "935ad949",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' 2 + 2 = 4', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"whats 2 + 2\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d8d042c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a custom function\n",
|
||||
"## Using a custom function (Recommended)\n",
|
||||
"\n",
|
||||
"You can also use a custom function to route between different outputs. Here's an example:"
|
||||
]
|
||||
@@ -350,13 +231,89 @@
|
||||
"full_chain.invoke({\"question\": \"whats 2 + 2\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5147b827",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a RunnableBranch\n",
|
||||
"\n",
|
||||
"A `RunnableBranch` is a special type of runnable that allows you to define a set of conditions and runnables to execute based on the input. It does **not** offer anything that you can't achieve in a custom function as described above, so we recommend using a custom function instead.\n",
|
||||
"\n",
|
||||
"A `RunnableBranch` is initialized with a list of (condition, runnable) pairs and a default runnable. It selects which branch by passing each condition the input it's invoked with. It selects the first condition to evaluate to True, and runs the corresponding runnable to that condition with the input. \n",
|
||||
"\n",
|
||||
"If no provided conditions match, it runs the default runnable.\n",
|
||||
"\n",
|
||||
"Here's an example of what it looks like in action:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "46802d04",
|
||||
"id": "2a101418",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" As Dario Amodei told me, here are some ways to use Anthropic:\\n\\n- Sign up for an account on Anthropic's website to access tools like Claude, Constitutional AI, and Writer. \\n\\n- Use Claude for tasks like email generation, customer service chat, and QA. Claude can understand natural language prompts and provide helpful responses.\\n\\n- Use Constitutional AI if you need an AI assistant that is harmless, honest, and helpful. It is designed to be safe and aligned with human values.\\n\\n- Use Writer to generate natural language content for things like marketing copy, stories, reports, and more. Give it a topic and prompt and it will create high-quality written content.\\n\\n- Check out Anthropic's documentation and blog for tips, tutorials, examples, and announcements about new capabilities as they continue to develop their AI technology.\\n\\n- Follow Anthropic on social media or subscribe to their newsletter to stay up to date on new features and releases.\\n\\n- For most people, the easiest way to leverage Anthropic's technology is through their website - just create an account to get started!\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.runnables import RunnableBranch\n",
|
||||
"\n",
|
||||
"branch = RunnableBranch(\n",
|
||||
" (lambda x: \"anthropic\" in x[\"topic\"].lower(), anthropic_chain),\n",
|
||||
" (lambda x: \"langchain\" in x[\"topic\"].lower(), langchain_chain),\n",
|
||||
" general_chain,\n",
|
||||
")\n",
|
||||
"full_chain = {\"topic\": chain, \"question\": lambda x: x[\"question\"]} | branch\n",
|
||||
"full_chain.invoke({\"question\": \"how do I use Anthropic?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8d8caf9b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' As Harrison Chase told me, here is how you use LangChain:\\n\\nLangChain is an AI assistant that can have conversations, answer questions, and generate text. To use LangChain, you simply type or speak your input and LangChain will respond. \\n\\nYou can ask LangChain questions, have discussions, get summaries or explanations about topics, and request it to generate text on a subject. Some examples of interactions:\\n\\n- Ask general knowledge questions and LangChain will try to answer factually. For example \"What is the capital of France?\"\\n\\n- Have conversations on topics by taking turns speaking. You can prompt the start of a conversation by saying something like \"Let\\'s discuss machine learning\"\\n\\n- Ask for summaries or high-level explanations on subjects. For example \"Can you summarize the main themes in Shakespeare\\'s Hamlet?\" \\n\\n- Give creative writing prompts or requests to have LangChain generate text in different styles. For example \"Write a short children\\'s story about a mouse\" or \"Generate a poem in the style of Robert Frost about nature\"\\n\\n- Correct LangChain if it makes an inaccurate statement and provide the right information. This helps train it.\\n\\nThe key is interacting naturally and giving it clear prompts and requests', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"how do I use LangChain?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "26159af7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' 2 + 2 = 4', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"full_chain.invoke({\"question\": \"whats 2 + 2\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "bb7d49db-04d3-4399-bfe1-09f82bbe6015",
|
||||
"metadata": {},
|
||||
@@ -28,7 +27,7 @@
|
||||
"1. sync `stream` and async `astream`: a **default implementation** of streaming that streams the **final output** from the chain.\n",
|
||||
"2. async `astream_events` and async `astream_log`: these provide a way to stream both **intermediate steps** and **final output** from the chain.\n",
|
||||
"\n",
|
||||
"Let's take a look at both approaches, and try to understand a how to use them. 🥷\n",
|
||||
"Let's take a look at both approaches, and try to understand how to use them. 🥷\n",
|
||||
"\n",
|
||||
"## Using Stream\n",
|
||||
"\n",
|
||||
@@ -48,7 +47,25 @@
|
||||
"\n",
|
||||
"Large language models can take **several seconds** to generate a complete response to a query. This is far slower than the **~200-300 ms** threshold at which an application feels responsive to an end user.\n",
|
||||
"\n",
|
||||
"The key strategy to make the application feel more responsive is to show intermediate progress; e.g., to stream the output from the model **token by token**."
|
||||
"The key strategy to make the application feel more responsive is to show intermediate progress; viz., to stream the output from the model **token by token**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9eb73e8b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will show examples of streaming using the chat model from [Anthropic](https://python.langchain.com/docs/integrations/platforms/anthropic). To use the model, you will need to install the `langchain-anthropic` package. You can do this with the following command:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cd351cf4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install -qU langchain-anthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -68,7 +85,7 @@
|
||||
"source": [
|
||||
"# Showing the example using anthropic, but you can use\n",
|
||||
"# your favorite chat model!\n",
|
||||
"from langchain.chat_models import ChatAnthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"model = ChatAnthropic()\n",
|
||||
"\n",
|
||||
@@ -152,7 +169,7 @@
|
||||
"We will use `StrOutputParser` to parse the output from the model. This is a simple parser that extracts the `content` field from an `AIMessageChunk`, giving us the `token` returned by the model.\n",
|
||||
"\n",
|
||||
":::{.callout-tip}\n",
|
||||
"LCEL is a *declarative* way to specify a \"program\" by chainining together different LangChain primitives. Chains created using LCEL benefit from an automatic implementation of `stream`, and `astream` allowing streaming of the final output. In fact, chains created with LCEL implement the entire standard Runnable interface.\n",
|
||||
"LCEL is a *declarative* way to specify a \"program\" by chainining together different LangChain primitives. Chains created using LCEL benefit from an automatic implementation of `stream` and `astream` allowing streaming of the final output. In fact, chains created with LCEL implement the entire standard Runnable interface.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -330,7 +347,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "cab6dca2-2027-414d-a196-2db6e3ebb8a5",
|
||||
"metadata": {},
|
||||
@@ -464,12 +480,12 @@
|
||||
"id": "6fd3e71b-439e-418f-8a8a-5232fba3d9fd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Stream just yielded the final result from that component. \n",
|
||||
"Stream just yielded the final result from that component.\n",
|
||||
"\n",
|
||||
"This is OK 🥹! Not all components have to implement streaming -- in some cases streaming is either unnecessary, difficult or just doesn't make sense.\n",
|
||||
"\n",
|
||||
":::{.callout-tip}\n",
|
||||
"An LCEL chain constructed using using non-streaming components, will still be able to stream in a lot of cases, with streaming of partial output starting after the last non-streaming step in the chain.\n",
|
||||
"An LCEL chain constructed using non-streaming components, will still be able to stream in a lot of cases, with streaming of partial output starting after the last non-streaming step in the chain.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -1397,7 +1413,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -40,26 +40,7 @@
|
||||
"id": "b99b47ec",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-core langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dbeac2b8-c441-4d8d-b313-1de0ab9c7e51",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
|
||||
"model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
|
||||
"output_parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"chain = prompt | model | output_parser"
|
||||
"%pip install --upgrade --quiet langchain-core langchain-openai langchain-anthropic"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -127,6 +108,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -476,7 +460,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"anthropic = ChatAnthropic(model=\"claude-2\")\n",
|
||||
"anthropic_chain = (\n",
|
||||
@@ -1010,7 +994,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
@@ -1070,9 +1054,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
@@ -14,7 +14,16 @@ This framework consists of several parts.
|
||||
- **[LangServe](/docs/langserve)**: A library for deploying LangChain chains as a REST API.
|
||||
- **[LangSmith](/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||

|
||||
import ThemedImage from '@theme/ThemedImage';
|
||||
|
||||
<ThemedImage
|
||||
alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
|
||||
sources={{
|
||||
light: '/svg/langchain_stack.svg',
|
||||
dark: '/svg/langchain_stack_dark.svg',
|
||||
}}
|
||||
title="LangChain Framework Overview"
|
||||
/>
|
||||
|
||||
Together, these products simplify the entire application lifecycle:
|
||||
- **Develop**: Write your applications in LangChain/LangChain.js. Hit the ground running using Templates for reference.
|
||||
|
||||
@@ -58,14 +58,14 @@ LangChain enables building application that connect external sources of data and
|
||||
In this quickstart, we will walk through a few different ways of doing that.
|
||||
We will start with a simple LLM chain, which just relies on information in the prompt template to respond.
|
||||
Next, we will build a retrieval chain, which fetches data from a separate database and passes that into the prompt template.
|
||||
We will then add in chat history, to create a conversation retrieval chain. This allows you interact in a chat manner with this LLM, so it remembers previous questions.
|
||||
We will then add in chat history, to create a conversation retrieval chain. This allows you to interact in a chat manner with this LLM, so it remembers previous questions.
|
||||
Finally, we will build an agent - which utilizes an LLM to determine whether or not it needs to fetch data to answer questions.
|
||||
We will cover these at a high level, but there are lot of details to all of these!
|
||||
We will link to relevant docs.
|
||||
|
||||
## LLM Chain
|
||||
|
||||
For this getting started guide, we will provide two options: using OpenAI (a popular model available via API) or using a local open source model.
|
||||
We'll show how to use models available via API, like OpenAI, and local open source models, using integrations like Ollama.
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="openai" label="OpenAI" default>
|
||||
@@ -99,7 +99,7 @@ llm = ChatOpenAI(openai_api_key="...")
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="local" label="Local">
|
||||
<TabItem value="local" label="Local (using Ollama)">
|
||||
|
||||
[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally.
|
||||
|
||||
@@ -112,6 +112,66 @@ Then, make sure the Ollama server is running. After that, you can do:
|
||||
```python
|
||||
from langchain_community.llms import Ollama
|
||||
llm = Ollama(model="llama2")
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="anthropic" label="Anthropic">
|
||||
|
||||
First we'll need to import the LangChain x Anthropic package.
|
||||
|
||||
```shell
|
||||
pip install langchain-anthropic
|
||||
```
|
||||
|
||||
Accessing the API requires an API key, which you can get by creating an account [here](https://claude.ai/login). Once we have a key we'll want to set it as an environment variable by running:
|
||||
|
||||
```shell
|
||||
export ANTHROPIC_API_KEY="..."
|
||||
```
|
||||
|
||||
We can then initialize the model:
|
||||
|
||||
```python
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024)
|
||||
```
|
||||
|
||||
If you'd prefer not to set an environment variable you can pass the key in directly via the `anthropic_api_key` named parameter when initiating the Anthropic Chat Model class:
|
||||
|
||||
```python
|
||||
llm = ChatAnthropic(anthropic_api_key="...")
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="cohere" label="Cohere">
|
||||
|
||||
First we'll need to import the Cohere SDK package.
|
||||
|
||||
```shell
|
||||
pip install cohere
|
||||
```
|
||||
|
||||
Accessing the API requires an API key, which you can get by creating an account and heading [here](https://dashboard.cohere.com/api-keys). Once we have a key we'll want to set it as an environment variable by running:
|
||||
|
||||
```shell
|
||||
export COHERE_API_KEY="..."
|
||||
```
|
||||
|
||||
We can then initialize the model:
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatCohere
|
||||
|
||||
llm = ChatCohere()
|
||||
```
|
||||
|
||||
If you'd prefer not to set an environment variable you can pass the key in directly via the `cohere_api_key` named parameter when initiating the Cohere LLM class:
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatCohere
|
||||
|
||||
llm = ChatCohere(cohere_api_key="...")
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
@@ -193,17 +253,17 @@ After that, we can import and use WebBaseLoader.
|
||||
|
||||
```python
|
||||
from langchain_community.document_loaders import WebBaseLoader
|
||||
loader = WebBaseLoader("https://docs.smith.langchain.com/overview")
|
||||
loader = WebBaseLoader("https://docs.smith.langchain.com/user_guide")
|
||||
|
||||
docs = loader.load()
|
||||
```
|
||||
|
||||
Next, we need to index it into a vectorstore. This requires a few components, namely an [embedding model](/docs/modules/data_connection/text_embedding) and a [vectorstore](/docs/modules/data_connection/vectorstores).
|
||||
|
||||
For embedding models, we once again provide examples for accessing via OpenAI or via local models.
|
||||
For embedding models, we once again provide examples for accessing via API or by running local models.
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="openai" label="OpenAI" default>
|
||||
<TabItem value="openai" label="OpenAI (API)" default>
|
||||
|
||||
Make sure you have the `langchain_openai` package installed an the appropriate environment variables set (these are the same as needed for the LLM).
|
||||
|
||||
@@ -214,7 +274,7 @@ embeddings = OpenAIEmbeddings()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="local" label="Local">
|
||||
<TabItem value="local" label="Local (using Ollama)">
|
||||
|
||||
Make sure you have Ollama running (same set up as with the LLM).
|
||||
|
||||
@@ -224,6 +284,17 @@ from langchain_community.embeddings import OllamaEmbeddings
|
||||
embeddings = OllamaEmbeddings()
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="cohere" label="Cohere (API)" default>
|
||||
|
||||
Make sure you have the `cohere` package installed an the appropriate environment variables set (these are the same as needed for the LLM).
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import CohereEmbeddings
|
||||
|
||||
embeddings = CohereEmbeddings()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
Now, we can use this embedding model to ingest documents into a vectorstore.
|
||||
@@ -239,7 +310,7 @@ Then we can build our index:
|
||||
|
||||
```python
|
||||
from langchain_community.vectorstores import FAISS
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
|
||||
|
||||
text_splitter = RecursiveCharacterTextSplitter()
|
||||
@@ -374,7 +445,7 @@ The final thing we will create is an agent - where the LLM decides what steps to
|
||||
**NOTE: for this example we will only show how to create an agent using OpenAI models, as local models are not reliable enough yet.**
|
||||
|
||||
One of the first things to do when building an agent is to decide what tools it should have access to.
|
||||
For this example, we will give the agent access two tools:
|
||||
For this example, we will give the agent access to two tools:
|
||||
|
||||
1. The retriever we just created. This will let it easily answer questions about LangSmith
|
||||
2. A search tool. This will let it easily answer questions that require up to date information.
|
||||
@@ -489,7 +560,7 @@ from langchain_openai import ChatOpenAI
|
||||
from langchain_community.document_loaders import WebBaseLoader
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from langchain_community.vectorstores import FAISS
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
from langchain.tools.retriever import create_retriever_tool
|
||||
from langchain_community.tools.tavily_search import TavilySearchResults
|
||||
from langchain_openai import ChatOpenAI
|
||||
@@ -501,7 +572,7 @@ from langchain_core.messages import BaseMessage
|
||||
from langserve import add_routes
|
||||
|
||||
# 1. Load Retriever
|
||||
loader = WebBaseLoader("https://docs.smith.langchain.com/overview")
|
||||
loader = WebBaseLoader("https://docs.smith.langchain.com/user_guide")
|
||||
docs = loader.load()
|
||||
text_splitter = RecursiveCharacterTextSplitter()
|
||||
documents = text_splitter.split_documents(docs)
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.evaluation import AgentTrajectoryEvaluator\n",
|
||||
"from langchain.schema import AgentAction\n",
|
||||
"from langchain_core.agents import AgentAction\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
"\n",
|
||||
"The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), and [GPT4All](https://github.com/nomic-ai/gpt4all) underscore the demand to run LLMs locally (on your own device).\n",
|
||||
"The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), [GPT4All](https://github.com/nomic-ai/gpt4all), and [llamafile](https://github.com/Mozilla-Ocho/llamafile) underscore the demand to run LLMs locally (on your own device).\n",
|
||||
"\n",
|
||||
"This has at least two important benefits:\n",
|
||||
"\n",
|
||||
@@ -46,7 +46,8 @@
|
||||
"\n",
|
||||
"1. [`llama.cpp`](https://github.com/ggerganov/llama.cpp): C++ implementation of llama inference code with [weight optimization / quantization](https://finbarr.ca/how-is-llama-cpp-possible/)\n",
|
||||
"2. [`gpt4all`](https://docs.gpt4all.io/index.html): Optimized C backend for inference\n",
|
||||
"3. [`Ollama`](https://ollama.ai/): Bundles model weights and environment into an app that runs on device and serves the LLM \n",
|
||||
"3. [`Ollama`](https://ollama.ai/): Bundles model weights and environment into an app that runs on device and serves the LLM\n",
|
||||
"4. [`llamafile`](https://github.com/Mozilla-Ocho/llamafile): Bundles model weights and everything needed to run the model in a single file, allowing you to run the LLM locally from this file without any additional installation steps\n",
|
||||
"\n",
|
||||
"In general, these frameworks will do a few things:\n",
|
||||
"\n",
|
||||
@@ -157,7 +158,7 @@
|
||||
"\n",
|
||||
"### Running Apple silicon GPU\n",
|
||||
"\n",
|
||||
"`Ollama` will automatically utilize the GPU on Apple devices.\n",
|
||||
"`Ollama` and [`llamafile`](https://github.com/Mozilla-Ocho/llamafile?tab=readme-ov-file#gpu-support) will automatically utilize the GPU on Apple devices.\n",
|
||||
" \n",
|
||||
"Other frameworks require the user to set up the environment to utilize the Apple GPU.\n",
|
||||
"\n",
|
||||
@@ -191,7 +192,7 @@
|
||||
"\n",
|
||||
"There are various ways to gain access to quantized model weights.\n",
|
||||
"\n",
|
||||
"1. [`HuggingFace`](https://huggingface.co/TheBloke) - Many quantized model are available for download and can be run with framework such as [`llama.cpp`](https://github.com/ggerganov/llama.cpp)\n",
|
||||
"1. [`HuggingFace`](https://huggingface.co/TheBloke) - Many quantized model are available for download and can be run with framework such as [`llama.cpp`](https://github.com/ggerganov/llama.cpp). You can also download models in [`llamafile` format](https://huggingface.co/models?other=llamafile) from HuggingFace.\n",
|
||||
"2. [`gpt4all`](https://gpt4all.io/index.html) - The model explorer offers a leaderboard of metrics and associated quantized models available for download \n",
|
||||
"3. [`Ollama`](https://github.com/jmorganca/ollama) - Several models can be accessed directly via `pull`\n",
|
||||
"\n",
|
||||
@@ -428,6 +429,62 @@
|
||||
"llm(\"The first man on the moon was ... Let's think step by step\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "056854e2-5e4b-4a03-be7e-03192e5c4e1e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### llamafile\n",
|
||||
"\n",
|
||||
"One of the simplest ways to run an LLM locally is using a [llamafile](https://github.com/Mozilla-Ocho/llamafile). All you need to do is:\n",
|
||||
"\n",
|
||||
"1) Download a llamafile from [HuggingFace](https://huggingface.co/models?other=llamafile)\n",
|
||||
"2) Make the file executable\n",
|
||||
"3) Run the file\n",
|
||||
"\n",
|
||||
"llamafiles bundle model weights and a [specially-compiled](https://github.com/Mozilla-Ocho/llamafile?tab=readme-ov-file#technical-details) version of [`llama.cpp`](https://github.com/ggerganov/llama.cpp) into a single file that can run on most computers any additional dependencies. They also come with an embedded inference server that provides an [API](https://github.com/Mozilla-Ocho/llamafile/blob/main/llama.cpp/server/README.md#api-endpoints) for interacting with your model. \n",
|
||||
"\n",
|
||||
"Here's a simple bash script that shows all 3 setup steps:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"# Download a llamafile from HuggingFace\n",
|
||||
"wget https://huggingface.co/jartine/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile\n",
|
||||
"\n",
|
||||
"# Make the file executable. On Windows, instead just rename the file to end in \".exe\".\n",
|
||||
"chmod +x TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile\n",
|
||||
"\n",
|
||||
"# Start the model server. Listens at http://localhost:8080 by default.\n",
|
||||
"./TinyLlama-1.1B-Chat-v1.0.Q5_K_M.llamafile --server --nobrowser\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"After you run the above setup steps, you can use LangChain to interact with your model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "002e655c-ba18-4db3-ac7b-f33e825d14b6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\nFirstly, let's imagine the scene where Neil Armstrong stepped onto the moon. This happened in 1969. The first man on the moon was Neil Armstrong. We already know that.\\n2nd, let's take a step back. Neil Armstrong didn't have any special powers. He had to land his spacecraft safely on the moon without injuring anyone or causing any damage. If he failed to do this, he would have been killed along with all those people who were on board the spacecraft.\\n3rd, let's imagine that Neil Armstrong successfully landed his spacecraft on the moon and made it back to Earth safely. The next step was for him to be hailed as a hero by his people back home. It took years before Neil Armstrong became an American hero.\\n4th, let's take another step back. Let's imagine that Neil Armstrong wasn't hailed as a hero, and instead, he was just forgotten. This happened in the 1970s. Neil Armstrong wasn't recognized for his remarkable achievement on the moon until after he died.\\n5th, let's take another step back. Let's imagine that Neil Armstrong didn't die in the 1970s and instead, lived to be a hundred years old. This happened in 2036. In the year 2036, Neil Armstrong would have been a centenarian.\\nNow, let's think about the present. Neil Armstrong is still alive. He turned 95 years old on July 20th, 2018. If he were to die now, his achievement of becoming the first human being to set foot on the moon would remain an unforgettable moment in history.\\nI hope this helps you understand the significance and importance of Neil Armstrong's achievement on the moon!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.llms.llamafile import Llamafile\n",
|
||||
"\n",
|
||||
"llm = Llamafile()\n",
|
||||
"\n",
|
||||
"llm.invoke(\"The first man on the moon was ... Let's think step by step.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b84e543",
|
||||
@@ -611,7 +668,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"documents = [Document(page_content=document_content)]"
|
||||
]
|
||||
@@ -643,9 +643,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"# 2. Load the data: In our case data's already loaded\n",
|
||||
"# 3. Anonymize the data before indexing\n",
|
||||
@@ -879,7 +879,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain.schema import format_document\n",
|
||||
"from langchain_core.prompts import format_document\n",
|
||||
"\n",
|
||||
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
|
||||
"\n",
|
||||
|
||||
391
docs/docs/guides/structured_output.ipynb
Normal file
391
docs/docs/guides/structured_output.ipynb
Normal file
@@ -0,0 +1,391 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6e3f0f72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# [beta] Structured Output\n",
|
||||
"\n",
|
||||
"It is often crucial to have LLMs return structured output. This is because oftentimes the outputs of the LLMs are used in downstream applications, where specific arguments are required. Having the LLM return structured output reliably is necessary for that.\n",
|
||||
"\n",
|
||||
"There are a few different high level strategies that are used to do this:\n",
|
||||
"\n",
|
||||
"- Prompting: This is when you ask the LLM (very nicely) to return output in the desired format (JSON, XML). This is nice because it works with all LLMs. It is not nice because there is no guarantee that the LLM returns the output in the right format.\n",
|
||||
"- Function calling: This is when the LLM is fine-tuned to be able to not just generate a completion, but also generate a function call. The functions the LLM can call are generally passed as extra parameters to the model API. The function names and descriptions should be treated as part of the prompt (they usually count against token counts, and are used by the LLM to decide what to do).\n",
|
||||
"- Tool calling: A technique similar to function calling, but it allows the LLM to call multiple functions at the same time.\n",
|
||||
"- JSON mode: This is when the LLM is guaranteed to return JSON.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Different models may support different variants of these, with slightly different parameters. In order to make it easy to get LLMs to return structured output, we have added a common interface to LangChain models: `.with_structured_output`. \n",
|
||||
"\n",
|
||||
"By invoking this method (and passing in a JSON schema or a Pydantic model) the model will add whatever model parameters + output parsers are necessary to get back the structured output. There may be more than one way to do this (e.g., function calling vs JSON mode) - you can configure which method to use by passing into that method.\n",
|
||||
"\n",
|
||||
"Let's look at some examples of this in action!\n",
|
||||
"\n",
|
||||
"We will use Pydantic to easily structure the response schema."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "08029f4e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "070bf702",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Joke(BaseModel):\n",
|
||||
" setup: str = Field(description=\"The setup of the joke\")\n",
|
||||
" punchline: str = Field(description=\"The punchline to the joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98f6edfa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OpenAI\n",
|
||||
"\n",
|
||||
"OpenAI exposes a few different ways to get structured outputs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "3fe7caf0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "deddb6d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Function Calling\n",
|
||||
"\n",
|
||||
"By default, we will use `function_calling`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "6700994a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = ChatOpenAI()\n",
|
||||
"model_with_structure = model.with_structured_output(Joke)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "c55a61b8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup='Why was the cat sitting on the computer?', punchline='It wanted to keep an eye on the mouse!')"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model_with_structure.invoke(\"Tell me a joke about cats\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39d7a555",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### JSON Mode\n",
|
||||
"\n",
|
||||
"We also support JSON mode. Note that we need to specify in the prompt the format that it should respond in."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "df0370e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_with_structure = model.with_structured_output(Joke, method=\"json_mode\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "23844a26",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup=\"Why don't cats play poker in the jungle?\", punchline='Too many cheetahs!')"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model_with_structure.invoke(\n",
|
||||
" \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f3cce9e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Fireworks\n",
|
||||
"\n",
|
||||
"[Fireworks](https://fireworks.ai/) similarly supports function calling and JSON mode for select models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ad45fdd8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_fireworks import ChatFireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36270ed5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Function Calling\n",
|
||||
"\n",
|
||||
"By default, we will use `function_calling`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "49a20847",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = ChatFireworks(model=\"accounts/fireworks/models/firefunction-v1\")\n",
|
||||
"model_with_structure = model.with_structured_output(Joke)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "e3093a6c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup=\"Why don't cats play poker in the jungle?\", punchline='Too many cheetahs!')"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model_with_structure.invoke(\"Tell me a joke about cats\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ddb6b3ba",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### JSON Mode\n",
|
||||
"\n",
|
||||
"We also support JSON mode. Note that we need to specify in the prompt the format that it should respond in."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "ea0c22c1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_with_structure = model.with_structured_output(Joke, method=\"json_mode\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "649f9632",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup='Why did the dog sit in the shade?', punchline='To avoid getting burned.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model_with_structure.invoke(\n",
|
||||
" \"Tell me a joke about dogs, respond in JSON with `setup` and `punchline` keys\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ff70609a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Mistral\n",
|
||||
"\n",
|
||||
"We also support structured output with Mistral models, although we only support function calling."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "bffd3fad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_mistralai import ChatMistralAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "c8bd7549",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = ChatMistralAI(model=\"mistral-large-latest\")\n",
|
||||
"model_with_structure = model.with_structured_output(Joke)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "17b15816",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_with_structure.invoke(\"Tell me a joke about cats\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6bbbb698",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Together\n",
|
||||
"\n",
|
||||
"Since [TogetherAI](https://www.together.ai/) is just a drop in replacement for OpenAI, we can just use the OpenAI integration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "9b9617e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "90549664",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = ChatOpenAI(\n",
|
||||
" base_url=\"https://api.together.xyz/v1\",\n",
|
||||
" api_key=os.environ[\"TOGETHER_API_KEY\"],\n",
|
||||
" model=\"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n",
|
||||
")\n",
|
||||
"model_with_structure = model.with_structured_output(Joke)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "01da39be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Joke(setup='Why did the cat sit on the computer?', punchline='To keep an eye on the mouse!')"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model_with_structure.invoke(\"Tell me a joke about cats\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3066b2af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -177,7 +177,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import ArgillaCallbackHandler\n",
|
||||
"from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler\n",
|
||||
"\n",
|
||||
"argilla_callback = ArgillaCallbackHandler(\n",
|
||||
" dataset_name=\"langchain-dataset\",\n",
|
||||
@@ -213,7 +213,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks.stdout import StdOutCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"argilla_callback = ArgillaCallbackHandler(\n",
|
||||
@@ -277,9 +277,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.callbacks.stdout import StdOutCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"argilla_callback = ArgillaCallbackHandler(\n",
|
||||
@@ -361,7 +361,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n",
|
||||
"from langchain_core.callbacks.stdout import StdOutCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"argilla_callback = ArgillaCallbackHandler(\n",
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"if \"LANGCHAIN_COMET_TRACING\" in os.environ:\n",
|
||||
" del os.environ[\"LANGCHAIN_COMET_TRACING\"]\n",
|
||||
"\n",
|
||||
"from langchain.callbacks.tracers.comet import CometTracer\n",
|
||||
"from langchain_community.callbacks.tracers.comet import CometTracer\n",
|
||||
"\n",
|
||||
"tracer = CometTracer()\n",
|
||||
"\n",
|
||||
@@ -130,7 +130,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -118,7 +118,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.confident_callback import DeepEvalCallbackHandler\n",
|
||||
"from langchain_community.callbacks.confident_callback import DeepEvalCallbackHandler\n",
|
||||
"\n",
|
||||
"deepeval_callback = DeepEvalCallbackHandler(\n",
|
||||
" implementation_name=\"langchainQuickstart\", metrics=[answer_relevancy_metric]\n",
|
||||
@@ -215,10 +215,10 @@
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_file_url = \"https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt\"\n",
|
||||
"\n",
|
||||
@@ -296,7 +296,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.3"
|
||||
"version": "3.10.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -65,6 +65,23 @@
|
||||
"Ensure you have installed the `context-python` package before using the handler."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-03-06T19:05:26.534124Z",
|
||||
"iopub.status.busy": "2024-03-06T19:05:26.533924Z",
|
||||
"iopub.status.idle": "2024-03-06T19:05:26.798727Z",
|
||||
"shell.execute_reply": "2024-03-06T19:05:26.798135Z",
|
||||
"shell.execute_reply.started": "2024-03-06T19:05:26.534109Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks.context_callback import ContextCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
@@ -73,8 +90,6 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import ContextCallbackHandler\n",
|
||||
"\n",
|
||||
"token = os.environ[\"CONTEXT_API_TOKEN\"]\n",
|
||||
"\n",
|
||||
"context_callback = ContextCallbackHandler(token)"
|
||||
@@ -99,7 +114,6 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import ContextCallbackHandler\n",
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
@@ -155,7 +169,6 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain.callbacks import ContextCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.prompts.chat import (\n",
|
||||
|
||||
215
docs/docs/integrations/callbacks/fiddler.ipynb
Normal file
215
docs/docs/integrations/callbacks/fiddler.ipynb
Normal file
@@ -0,0 +1,215 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0cebf93b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Fiddler\n",
|
||||
"\n",
|
||||
">[Fiddler](https://www.fiddler.ai/) is the pioneer in enterprise Generative and Predictive system ops, offering a unified platform that enables Data Science, MLOps, Risk, Compliance, Analytics, and other LOB teams to monitor, explain, analyze, and improve ML deployments at enterprise scale. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "38d746c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 1. Installation and Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e0151955",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install langchain langchain-community langchain-openai fiddler-client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5662f2e5-d510-4eef-b44b-fa929e5b4ad4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Fiddler connection details "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "64fac323",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"*Before you can add information about your model with Fiddler*\n",
|
||||
"\n",
|
||||
"1. The URL you're using to connect to Fiddler\n",
|
||||
"2. Your organization ID\n",
|
||||
"3. Your authorization token\n",
|
||||
"\n",
|
||||
"These can be found by navigating to the *Settings* page of your Fiddler environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f6f8b73e-d350-40f0-b7a4-fb1e68a65a22",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"URL = \"\" # Your Fiddler instance URL, Make sure to include the full URL (including https://). For example: https://demo.fiddler.ai\n",
|
||||
"ORG_NAME = \"\"\n",
|
||||
"AUTH_TOKEN = \"\" # Your Fiddler instance auth token\n",
|
||||
"\n",
|
||||
"# Fiddler project and model names, used for model registration\n",
|
||||
"PROJECT_NAME = \"\"\n",
|
||||
"MODEL_NAME = \"\" # Model name in Fiddler"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0645805a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Create a fiddler callback handler instance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "13de4f9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks.fiddler_callback import FiddlerCallbackHandler\n",
|
||||
"\n",
|
||||
"fiddler_handler = FiddlerCallbackHandler(\n",
|
||||
" url=URL,\n",
|
||||
" org=ORG_NAME,\n",
|
||||
" project=PROJECT_NAME,\n",
|
||||
" model=MODEL_NAME,\n",
|
||||
" api_key=AUTH_TOKEN,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2276368e-f1dc-46be-afe3-18796e7a66f2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example 1 : Basic Chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c9de0fd1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# Note : Make sure openai API key is set in the environment variable OPENAI_API_KEY\n",
|
||||
"llm = OpenAI(temperature=0, streaming=True, callbacks=[fiddler_handler])\n",
|
||||
"output_parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"chain = llm | output_parser\n",
|
||||
"\n",
|
||||
"# Invoke the chain. Invocation will be logged to Fiddler, and metrics automatically generated\n",
|
||||
"chain.invoke(\"How far is moon from earth?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "309bde0b-e1ce-446c-98ac-3690c26a2676",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Few more invocations\n",
|
||||
"chain.invoke(\"What is the temperature on Mars?\")\n",
|
||||
"chain.invoke(\"How much is 2 + 200000?\")\n",
|
||||
"chain.invoke(\"Which movie won the oscars this year?\")\n",
|
||||
"chain.invoke(\"Can you write me a poem about insomnia?\")\n",
|
||||
"chain.invoke(\"How are you doing today?\")\n",
|
||||
"chain.invoke(\"What is the meaning of life?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48fa4782-c867-4510-9430-4ffa3de3b5eb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example 2 : Chain with prompt templates"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2aa2c220-8946-4844-8d3c-8f69d744d13f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" FewShotChatMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"examples = [\n",
|
||||
" {\"input\": \"2+2\", \"output\": \"4\"},\n",
|
||||
" {\"input\": \"2+3\", \"output\": \"5\"},\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"example_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" (\"ai\", \"{output}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"few_shot_prompt = FewShotChatMessagePromptTemplate(\n",
|
||||
" example_prompt=example_prompt,\n",
|
||||
" examples=examples,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"final_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a wondrous wizard of math.\"),\n",
|
||||
" few_shot_prompt,\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Note : Make sure openai API key is set in the environment variable OPENAI_API_KEY\n",
|
||||
"llm = OpenAI(temperature=0, streaming=True, callbacks=[fiddler_handler])\n",
|
||||
"\n",
|
||||
"chain = final_prompt | llm\n",
|
||||
"\n",
|
||||
"# Invoke the chain. Invocation will be logged to Fiddler, and metrics automatically generated\n",
|
||||
"chain.invoke({\"input\": \"What's the square of a triangle?\"})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -39,6 +39,16 @@
|
||||
"%pip install --upgrade --quiet tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3c9d9424-0879-4f14-91e5-1292e22820d7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks.infino_callback import InfinoCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
@@ -53,7 +63,6 @@
|
||||
"import matplotlib.dates as md\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from infinopy import InfinoClient\n",
|
||||
"from langchain.callbacks import InfinoCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -160,6 +160,25 @@
|
||||
"You can collect input LLM prompts and output responses in a LabelStudio project, connecting it via `LabelStudioCallbackHandler`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-03-06T19:07:34.462103Z",
|
||||
"iopub.status.busy": "2024-03-06T19:07:34.461651Z",
|
||||
"iopub.status.idle": "2024-03-06T19:07:34.661936Z",
|
||||
"shell.execute_reply": "2024-03-06T19:07:34.661284Z",
|
||||
"shell.execute_reply.started": "2024-03-06T19:07:34.462067Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks.labelstudio_callback import (\n",
|
||||
" LabelStudioCallbackHandler,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -170,7 +189,6 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import LabelStudioCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(\n",
|
||||
@@ -241,8 +259,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import LabelStudioCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat_llm = ChatOpenAI(\n",
|
||||
|
||||
@@ -19,7 +19,7 @@ export LLMONITOR_APP_ID="..."
|
||||
If you'd prefer not to set an environment variable, you can pass the key directly when initializing the callback handler:
|
||||
|
||||
```python
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
|
||||
handler = LLMonitorCallbackHandler(app_id="...")
|
||||
```
|
||||
@@ -29,7 +29,6 @@ handler = LLMonitorCallbackHandler(app_id="...")
|
||||
```python
|
||||
from langchain_openai import OpenAI
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
handler = LLMonitorCallbackHandler()
|
||||
|
||||
@@ -53,9 +52,9 @@ Example:
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.schema import SystemMessage, HumanMessage
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain_core.messages import SystemMessage, HumanMessage
|
||||
from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
llm = ChatOpenAI(temperature=0)
|
||||
|
||||
@@ -86,7 +85,8 @@ Another example:
|
||||
```python
|
||||
from langchain.agents import load_tools, initialize_agent, AgentType
|
||||
from langchain_openai import OpenAI
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
|
||||
|
||||
handler = LLMonitorCallbackHandler()
|
||||
|
||||
@@ -104,7 +104,7 @@ agent.run(
|
||||
User tracking allows you to identify your users, track their cost, conversations and more.
|
||||
|
||||
```python
|
||||
from langchain.callbacks.llmonitor_callback import LLMonitorCallbackHandler, identify
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler, identify
|
||||
|
||||
with identify("user-123"):
|
||||
llm("Tell me a joke")
|
||||
|
||||
@@ -68,14 +68,32 @@
|
||||
"In this simple example we use `PromptLayerCallbackHandler` with `ChatOpenAI`. We add a PromptLayer tag named `chatopenai`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"iopub.execute_input": "2024-03-06T19:10:56.673622Z",
|
||||
"iopub.status.busy": "2024-03-06T19:10:56.673421Z",
|
||||
"iopub.status.idle": "2024-03-06T19:10:56.887519Z",
|
||||
"shell.execute_reply": "2024-03-06T19:10:56.886895Z",
|
||||
"shell.execute_reply.started": "2024-03-06T19:10:56.673608Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import promptlayer # Don't forget this 🍰\n",
|
||||
"from langchain_community.callbacks.promptlayer_callback import (\n",
|
||||
" PromptLayerCallbackHandler,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import promptlayer # Don't forget this 🍰\n",
|
||||
"from langchain.callbacks import PromptLayerCallbackHandler\n",
|
||||
"from langchain.schema import (\n",
|
||||
" HumanMessage,\n",
|
||||
")\n",
|
||||
@@ -108,8 +126,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import promptlayer # Don't forget this 🍰\n",
|
||||
"from langchain.callbacks import PromptLayerCallbackHandler\n",
|
||||
"from langchain_community.llms import GPT4All\n",
|
||||
"\n",
|
||||
"model = GPT4All(model=\"./models/gpt4all-model.bin\", n_ctx=512, n_threads=8)\n",
|
||||
@@ -140,8 +156,6 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import promptlayer # Don't forget this 🍰\n",
|
||||
"from langchain.callbacks import PromptLayerCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -70,6 +70,16 @@
|
||||
"os.environ[\"SERPAPI_API_KEY\"] = \"<ADD-KEY-HERE>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e79dc1c0-b9dc-4652-9059-f3a8aa97b74a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks.sagemaker_callback import SageMakerCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -80,7 +90,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import initialize_agent, load_tools\n",
|
||||
"from langchain.callbacks import SageMakerCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain, SimpleSequentialChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
|
||||
@@ -28,7 +28,9 @@ You can run `streamlit hello` to load a sample app and validate your install suc
|
||||
To create a `StreamlitCallbackHandler`, you just need to provide a parent container to render the output.
|
||||
|
||||
```python
|
||||
from langchain_community.callbacks import StreamlitCallbackHandler
|
||||
from langchain_community.callbacks.streamlit import (
|
||||
StreamlitCallbackHandler,
|
||||
)
|
||||
import streamlit as st
|
||||
|
||||
st_callback = StreamlitCallbackHandler(st.container())
|
||||
@@ -47,7 +49,6 @@ thoughts and actions live in your app.
|
||||
import streamlit as st
|
||||
from langchain import hub
|
||||
from langchain.agents import AgentExecutor, create_react_agent, load_tools
|
||||
from langchain_community.callbacks import StreamlitCallbackHandler
|
||||
from langchain_openai import OpenAI
|
||||
|
||||
llm = OpenAI(temperature=0, streaming=True)
|
||||
|
||||
@@ -65,6 +65,16 @@
|
||||
"os.environ[\"TRUBRICS_PASSWORD\"] = \"***\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36fa67da-8a05-4d54-b0a3-dc173f3107a0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.callbacks.trubrics_callback import TrubricsCallbackHandler"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd7177b0-a9e8-45ae-adb0-ea779376511b",
|
||||
@@ -148,7 +158,6 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import TrubricsCallbackHandler\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
@@ -266,8 +275,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import TrubricsCallbackHandler\n",
|
||||
"from langchain.schema import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
||||
141
docs/docs/integrations/chat/ai21.ipynb
Normal file
141
docs/docs/integrations/chat/ai21.ipynb
Normal file
@@ -0,0 +1,141 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "4cebeec0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AI21 Labs\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatAI21\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with AI21 chat models.\n",
|
||||
"\n",
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4c3bef91",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-02-15T06:50:44.929635Z",
|
||||
"start_time": "2024-02-15T06:50:41.209704Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install -qU langchain-ai21"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Environment Setup\n",
|
||||
"\n",
|
||||
"We'll need to get a [AI21 API key](https://docs.ai21.com/) and set the `AI21_API_KEY` environment variable:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"AI21_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4828829d3da430ce",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "39353473fce5dd2e",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Bonjour, comment vas-tu?')"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_ai21 import ChatAI21\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"chat = ChatAI21(model=\"j2-ultra\")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a helpful assistant that translates English to French.\"),\n",
|
||||
" (\"human\", \"Translate this sentence from English to French. {english_text}.\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({\"english_text\": \"Hello, how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c159a79f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user