mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-10 03:00:59 +00:00
Compare commits
50 Commits
langchain-
...
langchain=
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9213d94057 | ||
|
|
9c682af8f3 | ||
|
|
08796802ca | ||
|
|
b075eab3e0 | ||
|
|
372dc7f991 | ||
|
|
e7883d5b9f | ||
|
|
d075ad21a0 | ||
|
|
f23c3e2444 | ||
|
|
86beb64b50 | ||
|
|
6f8735592b | ||
|
|
47d50f49d9 | ||
|
|
94a7fd2497 | ||
|
|
0d2cea747c | ||
|
|
dd0faab07e | ||
|
|
21ab1dc675 | ||
|
|
22cee5d983 | ||
|
|
a14d8b103b | ||
|
|
6d22f40a0b | ||
|
|
92189c8b31 | ||
|
|
1f0686db80 | ||
|
|
e6b6c07395 | ||
|
|
1cf91a2386 | ||
|
|
e181d43214 | ||
|
|
59908f04d4 | ||
|
|
05482877be | ||
|
|
63673b765b | ||
|
|
3aa080c2a8 | ||
|
|
14b7d790c1 | ||
|
|
0b2244ea88 | ||
|
|
80064893c1 | ||
|
|
956b09f468 | ||
|
|
b28a474e79 | ||
|
|
92dc3f7341 | ||
|
|
d0a9808148 | ||
|
|
ed2428f902 | ||
|
|
75823d580b | ||
|
|
7664874a0d | ||
|
|
d7d0bca2bc | ||
|
|
3781144710 | ||
|
|
a9b1e1b177 | ||
|
|
8119a7bc5c | ||
|
|
56629ed87b | ||
|
|
913c8b71d9 | ||
|
|
7e3dea5db8 | ||
|
|
d602141ab1 | ||
|
|
dd9031fc82 | ||
|
|
3382b0d8ea | ||
|
|
e90abce577 | ||
|
|
c127ae9d26 | ||
|
|
65ecc22606 |
@@ -358,7 +358,7 @@
|
||||
"id": "6e5cd014-db86-4d6b-8399-25cae3da5570",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Helper function to plot retrived similar images"
|
||||
"## Helper function to plot retrieved similar images"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
eNrtVmlUFFcWJsGFmBk00XGJW9OikyDVVO8LommgwQ5ikxZUQAarq17TBV2LVdXQwDgKxhg1iZQyJkhGoyytiCjgoKgY0cRxQRj1iIe4xMkYiXNy1IgROaLOa5YRj/6cP5mxzuk6XfXuu/e7937v1pfvyQQcTzL0K5UkLQAOwwX4wK/P93BgiQvwwgflFBAcDFEaZ5kXX+LiyLYghyCwvCEkBGNJGcMCGiNlOEOFZMpDcAcmhMD/rBP0uCm1MUR2G5srpQDPY2mAlxqSc6U4AyPRgtQgjQdOp4QCEkySzmQAabCUY5wAvnfxgJMuTQmWUgwBnPBFGisgKgahSJqEVrzAAYySGuyYkwfBUgFQLEQuuDi4F5WhSz0OgBEwras+I0odDC+IVc9C3Y3hOIAeAY0zBEmnibvSckg2WEIAuxMTQAUESIOeQogVGQCwCOYkM0F57y5xD8ayThLHvOsh6TxDV/YlhAjZLHh+ucKbDQKzpwVxrwWCMJpD4rJhTWmJXKaRy+R73AgvYCTthEVCnBjEU872rB8cuMBieAZ0gvT1Syzv3Vw10IbhxbJYDLfMe8YlxuEOsQzjKI2qduB7zkULJAVET0Tc8+H6Fp+GU8rkCpmu+hnHfDaNi2U9bdj3zGYgcNkIzkAf4la0HGeYDBKIbXdTU3F7qo0Km2NCoy2kJWGJFTUlzVVl8Hi21pZqtfAul9nORlrM6a73c2h+NkcyiFyr1Gl1ehTVIXIZKoMpI2p9hs5BWGZHuMzuKDQyJtwSu0Rn5GJ182z0wki5PUuXoGc5U6SGRuPpeE1SGodRQlSmiiA0RIIuNsklk0U7+Sgnk8CkJ8oUiXGpcxRoVqgEonNlkkQYatQnLnFHxsforQ4NRrgzIq0uR1YCistiw91ah5GLizImpVnTFWzaAHgauRZB+xBqUJUO9V5V/dxwAjpNcIglcrliOwd4Fp4OsKIclkxw8fmlkIeg6YSn75hss8Q8pfDo0kjISbFhASCCJahaEgVsEgWqUMObQa0xqPSS6Nj4yoi+MPEvpGB1PIfRvB3S0NRPeQ/ucNEZgKiIeCHZG7xkh530woeHEgFuluEB0odKrFyIWHvnA2KOrO09WQjDpWE0mdMTVmzoYX1WjjuLwF0E4cjMolB9jkpJ2oALt+/t28JyjDcMBIRQvFiiU6ir+lb6eVcBc0UROYqg8gNuBJ5y4CQpEtaz5943pHixVA2Lvf95AwHOFZoXPaqebqCHB1pwgIKE9cZ+6kal1+sPvdio35USmui1+gPPWvFgIBq5guL3P2/Q52Ibyle6+60RkhDbAuFDqhpVaFDcrleplEoVhioBIVdDetlwvV0D1JimHs49EodevM1kGU5AeIDDiSxki23BFOb2zpgwpVyt1MBMQyUkjTtdBJjnskUy3hz4UAnLASeDEbtxO4JjuAMgvfwTPZGJc42x5oi6hchAIiEWtmeMix6a4WnSbi+fBzjYGLECdzIuAg5LDpRHRCFWY6K4V48Satyu0MltQKuzY3YkHI6hfm//oV2pd9J6MCfEnomLtQ5lmNQA85WGSigsTKeBber5ZuSVe3Ol07555dTktX4+PZcv/D158rG1mr6Ejrjyy+8WNM8PSDZOzX3Nujciz//mCMExLG+a5dzUUScL4v8sufPVmMJRMVHJX76+qvPWpO8OHrw4aLlIDF287QejML9M23LmWMGy+oZc5xpmX9aW7vPt3+/4adPu9WvzKNsq/cwv/ZiU2ytvVh59N8qylz3hG3WBEj8z1bfUcIGGKe9/U3f8w3unSieS72W13Qo6s0ZIbr6yKyBv88Rr0a/63DEcPuvZ+KDJv4C6WuDf0P7bmmP1fsv1JdKj+zuv+17Mo0Zbx06MW9f61b4QiTnc8caha/4rlb6/Kc3LuOabdfetW780PP4ov3PSVtPY/UPPr9oeZNa++1bS3biRQV30o0+OHeicEXWhwar8k+Hh/PLq5gmPmrb8vEnVVcedaDh7tmT48RoxbKrPrL913RsjFrnKa7cOu/GGa9Rl+vf6Oicw3UpTfH7kQn57x5yQC0Gr8zbc3dmlaDSt5W4u2tiYa7abAnc2LdvUVXy/sFjomHX39YBB3y6vqfGfuvr0kZiiY+7aJ0GhLW5V04wDqYF7hn0gu2W0nclVp7zZETJz8YILpeOF1rPC3L/u+rSmqePK4ccFtdcLu4d4G+br83hqVcci2L2XmuKlpnipKX4FmkKN/i9pCo32/1hTKJR2zEZo9Da72qbUqgGmVGJ2XKEiMB1QauzyX4WmACiw/zc1xfGnmuJja+On59ERDe3Th2wZX1hnJHONgjE8f+SE0atNYoZlfeKESzV/oT5LeDJRf1KzY3FZw4M7V5s6cpUdhT7oeufQOaoZH7VW3Du24/CspY+6Sx7Wtkx/FPbw/g/3p9zQah+u6j6/CYF64nKJX0sKs5LcBfXE8F49AaqvjiqmNlK529RvJjh3UrP5DXWl76SAnTOgnqgfd3B8xKthlxpbBvlc27GsxIL8/HVmAbJZHN3Qrq65UjZQTyDTvHqis5Upji4IDFD5/YFddbRoygSz78Uj6y6f7GQ9t+9NGnx7RvgK9vqkOWO2VQSm+JKOExGm7deLu7T+O5s7xiZkvffJg67vk4u+NfB3pixrnH7uevG5gM6o6HUzx57Ouf+FT/e/2s+nmOytRa0fKj7/bnvJNU5/opqcruRC1zcOqpr2x7Dme0m7Csfq3plVHDJykeptTzM57tAGwh6pON29u+gJObzefHG3uMUv4O9DCstcb5sXDd18asLx4qU7flL88yra8eOk2YvH+aVcXvH1mqz0/YE3Ji89Ulc5++KPlSX/+CIoefT0wsGzDp0eHNN8MtWnV0+8Jgua7NUT/wZqjsb/
|
||||
@@ -1 +1 @@
|
||||
eNrtWE1v28gZ7qK3XHopemaJngqPREqkvgyhcPxRK15bie1d21kshNHwlTgWyWE4Q1ly6kPT3gsW/QPdOFZhuNldJGi3H+m5h/4B76G/ZV9SVGQjabfudaWDpJl55/143o9H4rPJECLJRfDBFQ8URJQpXMjk2SSCJzFI9esLH5QrnPOH7b3953HEr3/qKhXKRrFIQ14QIQSUF5jwi0OzyFyqivg99CBTc94Vzvj6F091H6SkfZB645OnOhNoKVB6Q3fB84S+pEfCA1zGEiL97NMl3RcOeLjRDxWxBPF5wFFKqgiorzdUFMNs1RFhZklvPNV5wLzYgU6cmpqKnZ1NXKAOhvibc1dIlby87fTnlDFAIxAw4fCgn/yxf8rDJc2BnkcVXKKrAWSQJJcDgJBQjw/hYnor+YKGoccZTc+Lx1IEV3loRI1DePf4Mg2QoHOBSl630YmVVvHhGNENNLNQMQvmFyMiFeWBh3ARj6I/F2F2/rebByFlA1RC8swlF9PLL2/KCJm82KasvXdLJY2Ym7ygkV+xXt3cj+JAcR+SyerDd83lh3Nz5YJZKtS+vKVYjgOWvOhRT8Kfb10GFY0JE6gj+b3xcoaPB0Ffuclzs2T9IQIZYgbhVxd4TcXy2TnmAv71z0leNJ+1t2ZJ/Pf3fnS+hnlJ3hyAs6QZtrYBXa1klGx8a9iVhmVrP9/ev1rNzeynabjWFIxUEYbpzrRqljWs1EiCasaqR2pf7kc0kD3MzfqsDibMjYMBOJer762AN2kFYHhpPFi8BEahkEByN5OrQ7I7bR/SWns1LTcioj4N+GlWDsmbrBROTkcnDosdxx2e+Eb91CrzLsSs9zq/EkYiNYMOEV8mz+1q+WV+MkvGJQZvENMghvnXEYkQG4/7HAHO3vMelsm5bRjGV+8KKDEA7PaJZWSvf9yUiMDHLKa252qser3+9/cLzVSVUaReu+0NphhuemOWfPnVuwK5is8MeTWaSRPuJNc/wUXHtAxq04ptOYzZdoXWzVIVKk433aOWWf0L5pYz1JImMxQRJhsYDiw1Tq6XfDpKG69ZNu1yBSNd1vJpsRd310Qag1zWwgg8QZ3PWY8wylwg04JMJmtHOyvbrdXLPXRyVYgBh99+/cH3Ox3W63T95tg7Nh/vr9aGh9tb1VYlsg7g8IBXH7daEW0/CTo9KQ6j+DAsWTViVsu1aq1uGDYxC0YB25asb3jllrUuN3ds2zg83tjqR+2W1SntPNo16pR93Pef+Gb7gdUblT92gw9XhOeuq634uBR2tz8c9Xfu70QPT7YPWuIgvL9a2ToqlQc7gxWMhiq3WVzWsDY54tvMW4Zgy5C0YcxGadYwy5qTYdAs3B6Py9omzvp24I2Xtb0UTMBP6sMeV9DcEQFc/w4xiIfcae625aZ5uq5Uf3DkrT8Odumj4/ajcIMWjh7cH4mT4cA+egAndSoe3QDBqJeIkeNQMaxaVoVz1/9Pr/50SG5OANKeckQyCYQMeK93sQcRNlByyTwROzjpI7hY3SC7K0fJ67rh2KxnMEZL3bJR6ZL77b0J9bCYhix55ZabesOyyvqy5tNmrYJ9k3HcLy/S4gv6X//ghw5VtKEhGzlIYCkhMqRDsjLqH8vtbfYRs4fjTfBjZ9R2ux/Fh+Ph4xi5TXSPccrkNwpzCi1kcwgFGM4tBajzLXjWe3mS4ECwiFElZi2lTAyUM+gojsza0JHWaOyp9GAsFfidHvoMUYiup7Z7YadaAqdKu7bFUpuuwMtT0uaBAyO9YSyhEk/RlG9z1qY4oLBRglTtnNpTVoceUjH6F8Sed7ake6KPA60rpxtLOhrn0u1gYMiNuRSSf07f2fLeve8OnHPsNrOfRQvE/nfEfrxA6w5oaZviZAHYXQBjNFgAdhfAWgu47gLXlEMXmN0Fs7GIF4DdBTAlHDpeQHYHyH62QOvb0Pp2gHSpRKh/RyCaR/lUR1D8UHWmTzX0Ri39ezRz9+2uiVgqoag330HBW1c7DijKvewZZ/Zcwnkri5dp7HAx3zh7j5WbCqZZwWj+iw7cyB5woqEwAoezWx4b6T+7NAn/4fjs7G1yP1lr76x/eu/eN5t/1lQ=
|
||||
eNrtWE1v48YZbq57yaUoemSJngqPRIqULMkQirXkz40tx7KitYNAGM0MxYlJDk0OZUlbH7rtPWCQP5Cs1yoMd5NgF0maZnPuoX/AOfS39CVFrWzsNol7jQRB9sy8834878cj8fFkwIKQC++tK+5JFmAiYRHGjycBO4lYKP964TJpC3q+12wdPIkCfv0HW0o/rObz2Oc54TMP8xwRbn6g54mNZR7+9x2WqjnvCTq6/tMj1WVhiPssVKvvP1KJAEueVKuqzRxHqEtqIBwGyyhkgXr2wZLqCsoc2Oj7EpkCudzjIBXKgGFXrcogYrNVV/ipJbX6SOUecSLKulFiaip2djaxGaYQ4kfntghl/Oy2059jQhgYYR4RlHv9+O/9MfeXFMosB0t2Ca56LIUkvjxmzEfY4QN2Mb0Vf4F93+EEJ+f5D0PhXWWhITny2evHl0mACJzzZPyiCU7c38rvjQBdT9FzpVKu8MUQhRJzzwG4kIPBnws/Pf/nzQMfk2NQgrLMxRfTy89uyogwfrqDSbN1SyUOiB0/xYFbMp/f3A8iT3KXxZP63uvmssNX5iZGTtfh/eUtzeHII/FTCzsh+/rWbSaDESIClMSfas9mADnM60s7fqIXzL8FLPQhhewvF3BNRuHjc0gG+/e/JlnVfNZ8MMvif371m/MGJCZ+2Yq8JcXQlB0cKAWtUFT0ctU0q4ahbOwcXNUzMwdJHq4VyYYyzwbJzrRsVhQo1SBkshZJC5W/PAiwF1qQnLVZIUyIHXnHjF7W31gCL5MSgPCSeKB6ERv6ImQoczO+eoj2p/2DthrPp/WGRNDHHh+n9RC/TGvhdDw8pSSi1B6culplbBq8xyJivciu+IFIzIBDyA3jJ4Vi5Vl2MsvGJQSvIV1Dmv7tEAWAjcNdDgCnn1kTh/F5UdO0b14XkOKYQbtPTC19fX9TImAuZDGxPVdjViqV794sNFNlVJJX8dvbUoD1DTV6wQ2/eV0gU/GZFl4NZ9KI0/j697DoGr2KppcMU6PUxL1KzypRnRiUUFIwsFbA/4DccgJakmT6IoBkMwITS47i6yUXD5POqxl60ShBpCtKNi5aUa8hkhjCFcUPmCMw/ZxYiGBiMzQtyHjSONy9v7NVv2yBk3Uhjjn7+Ie3ftvtEqvbc2vto7Cs22vt9pHBR559WDzZbfpWl7a9wO4fNVe3Hw6CwkllVz/pI33ZNMCBwrKB9JyWg0ZCJxt0vDbmlSOnYzX8euE0NybuqGjpp51orbe+Vt+K3ms3C5vuzoPNjjPu1veG73n9xrjdKVjN5bWoXeocbh6tbUTbO/X9gK/vHnVaVmFja2/53YHNzNPcnjzZtx76/sEWhIilXcuvKFCwHECvZX2EoI9Q0kWVqj7rohWFpsDUcreH5oqyCQzQ9JzRitJKEGbwF7usxSWr7QqPXX8CwEQDTmvmgeGOnVI9t1pq5xo7D9bf6ZgHx/bYlmwXd9/t1+0TdzvoNPAmuYHMcrGItAyckmaW09Kcu/5/evXVQ3RzLKDmlDniiSdCj1vWRYsF0FXxJXFERGH+B+yivo727x/GLyqFcoUUTFKxsG5SvYRWm60JdqDCBiR+bhs1FYaPoa4oLq6VS9BMKfP9+SKpSK//w9u/pljiqgIcRYHWEpokQJJodSM41d0WbguzHO0f3t9e1Uxv1NmOAnG0Downeh/C6Mlu5ObEmkuHEwgQGGaSgc45eG9kTwRTwkTaMtLLCZFCoJywruTAt1UVyA5HjkwORqFkbtcCn1ngg+uJbcvv9ozlErV6PVpMbNoCLk+pnHuUDdWqtgRKHIkTFs64HMPUgu7xErVzwk+4nllA0OCfFznO2ZLqiD5MuV443VhSwTgP7S4EBoyZScFXgozU0+W9e78cOOfYbaZflhaI/XzEfrdA6w5oKZvidAHYXQAj2FsAdhfAthZw3QWuKYcuMLsLZiMRLQC7C2BSUDxaQHYHyP64QOun0PppgNRQCl/9hUA0j/KRCqC4vuxOH3Wo1XLy82jm7qtdHbCUQmJnvgOCt652KZOYO+mTz/RhBX0lC5dxRLmYb5y9wcpNBdOsQDQ/ogM20seeYMgPGOXklsda8ssuScL/OD47e5Xc9xvN3bUP7t37L6OJzvU=
|
||||
@@ -1 +1 @@
|
||||
eNrtWktz28Ydb5qbTz31jKDtpUNA4Puh4WQoyZYlWaJiypHdxMNZLhbESgAWxi74kEaHuv0C6PQLNFbEjkZxknGmjdO45x76BeRDP0v/C5AiFbmNkZmcAh8o7uL//O3/BXOfTgYk4JR571xQT5AAYQELHj2dBORJSLj445lLhM3M0912Z+9ZGNDL39pC+LyxtIR8qjOfeIjqmLlLg/wStpFYgu++Q2Ixpz1mjl///DfHqks4R33C1cZHxypmoMoTakPdI46juERBygE7JGpODZhDYD/kJFBPHudUl5nEgY2+L7QS01zqUUlFuA/iSddigYtA0LEqxr5kPODM63JsExcB3eIKaObfTMJxQH1pJDBtgm5FMEVIa6RqHXj9AHwLBJU2AysRoX+TM4YIvkpuIFFCf+6KoCL2pROz5mYWchFQr6+egIbQw7ZDPXJTLvL4kARSKrjKnAGYZ5PvCt694r8h/ERi9CSkATEB8an1ixofz8VsTqUmEljvgGABa2SaVFqDnN0FJCzkcALSPeQu8Eq1wNQQQUikblgT5Cbr2arLYudiNKmHndAk3VDGxIxtYhNkQjD+52e/OLUZF9Hz6wH2OcKYQBgQDzMTvIw+6x9RP6eYxHKQIOcQVR6Jwzc6PyTE15BDB+Qs4Yq+QL7vUIzk8yUZFxfTKNSk4zcfn8s40MA8T0QvW3zs4TZY0tpY2h1DOnhKXq/k9fwXI40LRD0HwltzEBh15sfP/7H4wEf4ECRp01SLzhLm54s0jEefbiPc7lwTiQJsR5+iwK2UXizuB6EnqEuiyeruTXXTh3N1RT1f0GtfXhMsPYo+i/804k/KXi4S2MSBQ9eS5I++7RGBdJne+kJ668nJ/v2aciKCsYYZ2BD9xXg+A9khXl/Y0bNyqf7XWfb+4QzYRMifnsKBkn//azItEp+0t+ax8MvTNTjc6NU+MXOKUVbukJ5SMApl+GiUK41SVVnf3rtYnarZk2d5CYk8EktkIHcSE5cVsD2ANGiGwtJqX+4FkGEW+Hd7FkwTbIfeITHPV98YRq9kGIF70h+oURoZ+YwTbWpmdPFQu5/US21j7UUSsxoL+sijR3FMRa/ieBoejYYmDk3THgxdo35UKtIeCbH11ZQFio5UAwZpLo+eVY368+mT2WGeg/OGljc0I//NSAsAG4e6FACOP6dFm0enZcMwvr5JICBhobxPSkb875+LFAEUR6ivoHsuplSv1799M9FMVBFI6tX6N9epAOsFMfmCy7++STAV8YnBL0Yzao2a0eWvYdEtVyrVWr1XqALG1ULP6lWtnkVwr0CKFaNoVV8mZUcT8jB9FsBhEwwdSoyjy5yLRjJ7m8V8GagNY1mZFp1O2Ftj0ge+rPgBcRgyP8eWhhE0By0JyGiy9mintb2xeg61W1tl7JCSP71+591uF1vdntu07naQvrJl7m1VyuPdD+/dGRyMxmzYfRTuj93tQs0/qFr7dw9b1Y0PtHy1WAMvDKOq5XVDh7TXcP1JpT00vNtVZ9UYhit0xNuP+vbDjYebv/PWibU58jbcJw/WwnIhWHf0tRX04LBTDYatUT3o3tt3xoPtVn1lrby9Pqp96G95R8MPSm4LvEHCbi4tKxCbUPh5c5oyGqSMJhMm3yjMEmZZMWMMmvr1Grus3IXm3vac8bLSkWAS+AulvkMFae4wj1z+GTAIB9Rsruq2l39QWa1zr1cbuX5xZ2d/H9+l44PyPUP3Udja2lm5XdDXj7YXQMgXy5oxxaFilGpxFM5N/4FW/e2htlgBtHbSaqKJx7hHLeusQwJIoOgcOyw0oV0E5Gz1jna/9Sj6qm6YZWzl62WzYJXMPNZW2p0JciCYBjh6YRebaqNUKqrLiouatQrkTTzU/P4sabWvf9U1kUANBZoatFpVlkgMBVJrjfoH4cpoY8Me992Ng53d+9YQ+/jI3XyACtAyp3024Vgoqnpch4AAQ90Ssn1fgVd54zikQUEoaTK8arITg6MUky506wDooDei0JHtnI+5IG7XAptJ4IPpUrfld6sFYlZRr1zCUqfNgDkZ0qhnkpHaMHIgxBHx1DQdzhAUKEgUT4qdj3LxWGZBRwf7vNBxYAxwWB8KWo8nGzkVlFNud8ExLqecmAoGkekUEC9v3frpwDnH7vhjVc3wenu8knk2Q+ztEftYbWQxlgqxfXuc4ZUCL8UEbzPAUgAGr/QZYGkA4xgGtwyyFJDhgA0zwNLE2JB6GWBpAEMZXunwGqIga5RpIHs/QyvVpJ/LJv1UiGVvkmnQkj8hZYClACz+vS0DLPu/ih8PsRWCUcizKEs1iGVv3ynfjBDPAEsDGAuF/JnEnF73yIB7W+CyV/CUlYxmmZkKMIsSJ3sHTwPZe9lElgqvkwyt70Pr+wFSuWC++hOBaO7lsbx76vqim1yPUhu1mrxoMbP3artQzamCCeRc7eTz5dx15q5JBKJOfOsyvuJkXhHDUaDQpGy+cfIGNYsCknMBf/6PDNiIL1yCIj8gJsXXTDbkJRF5DP/j8cnJ1fF+tNbeuf341q3/ArgMR1M=
|
||||
eNrtWk1z28YZbnr0qadOjyjaXjoEBRD81mg6EmXJsi2RFuVacuLhLBYLYiUAC2EXIimNDnX7B9DpH2isiB2N4iTjTBuncc899A/Ih/6WvguSIlW5jZmZnAKOhiQW7+ez7xfEfT48IhGnLPjgkgaCRAgLuODJ82FEDmPCxR/OfSJcZp+1mu2dF3FEr37tChHy+sICCmmehSRANI+Zv3BkLGAXiQX4HnokFXNmMXvw9se/OlF9wjnqEq7WPzxRMQNVgVDr6g7xPMUnClL22QFRc2rEPALrMSeRevosp/rMJh4sdEOhFZnm04BKKsJDEE86Dot8BIJOVDEIJeM+Z0GHY5f4COhmr4Bm+s0mHEc0lEYC033QrQimCGmNVJ0H3jAC3yJBpc3ASkQc3uZMIYKvkhtIlDicuiKoSH1pp6y5iYVcRDToqqegIQ6w69GA3JaLAt4jkZQKrjLvCMxzyX8Lbl3z3xJ+KjE6jGlEbEB8bP2sxmdTMffHUkcSmLVPsIBrZNtUWoO81gwSDvI4AekB8md4pVpgqosoJlI3XBPkj64nVx2WOpeiSQPsxTbpxDImJmxDlyAbgvHfP/rJmcu4SF7eDLDPEMYEwoAEmNngZfJp95iGOcUmjocEuYCoCkgavsnFASGhhjx6RM5HXMnnKAw9ipG8vyDj4nIchZp0/PbtCxkHGpgXiOT1Mh8EuAmWLG8stAaQDoFi5MvlfOHzvsYFooEH4a15CIw6D9P7f5+9ESJ8AJK0caol5yPml7M0jCefbCLcbN8QiSLsJp+gyC8XX82uR3EgqE+SYaN1W9345rW6oZk3DPj74oZk6VLyafpRT98pez1L4BIPdl0bZX/yjUUEysv8zs/kd360tX+7oZ2IaKBhBkYkf9ZfTlD2SNAVbvKiVKz9ZZK+vz8HNhHz52ewo+Rf/xyOq8THzQfTYPjp2SrsbvKmHQc5xdSVTRQpBb1QUoxqvVism0VlfXPnsjFWsyM38woyuS8WyJFcGZm4qIDtEeTBUiwcrfrFTgQp5oB/dyfRNMRuHBwQ+6Lxzjh6I+MI3JP+QJHSSD9knGhjM5PLXW17VDC1jdVXo6DVWNRFAT1Ogyp5kwZU77jfs3Fs2+5Rz9drx0WTWiTGzpdjFqg6Ug0YpPk8eVEwKy/Hdya7eQHO65qha7rxdV+LABuP+hQATt/HVZsnZyVd17+6TSAgY6G+D4t6+vrHLEUE1REKLOieiinWarVv3k00EWXW5Mv8+iYVYD0jxij4/KvbBGMRH+v8sj+h1qidXP0SLjpVVCbFAq6ZBVzBZceqmCUL6zWjWnJsHZvl16O6owm5mSGLYLMJhhYlBslVzkd9mb5LplEyy+DpojKuOu3YWmXSB76ohBHxGLI/w46GEXQHbRSQyXB1b2t5c6NxAcVbazB2QMkf337ws04HOx3LXwp7+09rT9e34g3mbjzZfdAvR3zfPu4+pCttcRS4je6m+9s1q7f9+LFmVIomGFCoFDUjr+chEbXGRmllgJ32mmPbe7y206xt05b7gNWwVT28v7O9vJ+/134Y2jvosF+NdvmjxjoJei2IX3ps9VnPbTTMsGU1reW9Nd7xEXerYdHLuw+PO041au9urj1t6WwwoM5dcBEJd2lhUYGAhXbAl8Z5pEEeaTKLanVjkkWLip0Cs5S/WXkXlXvQ8puBN1hU2hJhAp/QANpUkKUtFpCrPwEw8RG1l1bDR4/u2xsNGljL7qp/7FpGP79+t3XYerJTNuP9irm3vFo1tvKt3gwyVaOi6WNwynqxmobm1PTvaNVfd7XZsqA1Rw0oGQaMB9RxztskgqxKLrDHYhuaSETOG2va9vJe8mWtUK3hQsms1nS96lQtbaXZHiIPIuwIJ69cc0mF4mOqi4qPlqplSKZ01Pnd+agBv/1Fx0YC1RVoddCAVVk3MVRNbWU96hUeG6JS8Fce3yMP295qNzj01+8NNp0+NNJx9x1xzFTafFqcgABDMROyqU/Be+eQpEGVKGp6RTOqsj+DoxSTDvTwCOigY6LYk02eD7ggfscBm0kUgulStxN2LLNSth3LsktSp8uAeTS60cAmfbWu50CIJ9JZajyyIahakD2BFDsd8NJhzYE+D/YFsefBcOCxLlQ5i48Wcioop9ztgGNczj4pFYwn49kgvbxz54cD5xS7k49UNcPr/fEaTbkZYu+P2EdqPYuxuRB74g4yvObAS7HB2wywOQCDB/0MsHkA4xgGtwyyOSDDEetlgM0TYz0aZIDNAxjK8JoPrx6KskY5D2S/ydCaa9LPZZP+XIhlT5LzoCV/WMoAmwOw9Fe4DLDsfxXfH2IrBKOYZ1E21yCWPX3P+WSEeAbYPICxWMifSezxIZAMuPcFLnsEn7OS0Swz5wLMocTLnsHngezn2UQ2F16nGVrfhta3A6RywUL1BwLR1MsTeSLVD0VndGZKrVer8qDFxN7r5UIlpwomkHe9Yhil3E3mjk0Eol56FjM992RfE8NWoNimbLpw+g41swJG+wL+/B8ZsJAewwRFYURsim+YrMtDInIb/sft09Pr7f1wtbl199mdO/8BuSY6dA==
|
||||
@@ -1 +0,0 @@
|
||||
eNrtVU9v3EQUV4/c+AiutxKXzNpjr+3dlRBKWrUpTUkKgfCnaPU887Lrrj1jZsZpNiEHAhJctx+hiRIUVdALR245cOALhE/DzGY3CALiwKUSPdjWvHnz5r3fe7+fD093UOlCihsvCmFQATN2oZ8dnir8okFtvjmp0IwkP95Y/2DzqFHFxcbImFr3gyBHriQbE9UIU1TYbjRB0IbQNlSwJwU81W0mq6CSHMsAhBkpWReszUpoOJKdKCjEjhzjcS755OLLff/KZTBPyu97/uKWKIxiEiYkpv6S51eoNQxRW4/P9n0lS3S+jUbldpm0tQjjTJtYll6FHnhP7FX+wefuMOwOjF0Jd5yGUefgdITA7Z3fniwzhrWZnr8JdV0WDBwcwRMtxYvbl0HJ5qTGv9k/+9BeTpaH1mV6/mxFGhkHtB2nbRp7FQ+cgUmFrYWtgSBqh57UQQVM6lYUt1O7tq6g2KgFqko7XgliGNQT2wDRits0anedQz0pqrps3d6YbXhsexgoNGpCHNKtEofAJt7iwkUSZx+T5WqP3AHjsrdoJmEUJpv2SZPepz+B3dN8TFxLLqsiBZ+e30qjvMtitk26OaWkk3YY6fXyjAANgQIkEKfbPy4Oz2dmev4GGINVbd6mPyxgW0MxNKPpEY063yvUtZ0y/PpEGzCNPjxmNvNffzmdt/X5+oNFR767uDlLXJCV+SDc/yPDNVuMYJPpUS+lx7PKft5CvuSFiXcXc88VaV/9JOuHoXfv4ea1WOuNqRvbUjcM5La0gzx9HmXXco7D9MzaBM7YMT0bI9YEymIHL7xr2V0LSOOXuwSc0/uX+Nzn04tbYZpF0LVIctpNSCfKYtIDYATCBLpJTHudlP955l7+deRmxPnKYqgKMfztRrTvF9zv+5UeDnKuxoOQpqvvPVp9KuTK+JO1ZDt/1L3X7O1aAvnGBnSul3hbwyWFfNC6sD2x3FnyZ7y1tjlf7bRa4xW1LO3mQQzuOvfZp++vosK3tOWbLspyMmNd33ssHoutERiPS28iG4+BZSXY1dCrYFiwAsQ73rId99wqEHD34VLddHT1tZH1QFllcXrgo+AD0yjhzze0g1Qwm4doynLJSoCrp2+hcI24ojmNl3w5a/aVKcoODl7L3P9H5rIw/GeZyyFCyhIgeZZHpJOnnEDUoyRmSZhsdzLOu/RVkLn4X2SOvmoyB0jRYhESlqH9f8S8Q/JeRkkCFDEOwxCz6D/L3GoPa3V3vDfZqfDBR+8+3E3k1p3ktcwtZO535liL9g==
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1 @@
|
||||
eNrNlwlwE9cZx21gEkjCQMgUHNKAIg4T4pVXWkmW7DjFyMYWtmSDBD4CiNXuk3axtLveQ5JtaAImJAVasoQCpQ0FY1tgzGHMYcA0ZAgESBjKMaSGFkKaQCGBTHDJMTS4TxeWY46ESWe642v1vu+9//uO936eF/QBXqBZJrGJZkTA44QIX4Sl84I8qJCAIM5v8AKRYsm6okKbfZ3E0+1jKVHkhPTUVJyjVSwHGJxWEaw31adOJShcTIV/cx4QnqbOyZKV7VOqlV4gCLgbCMr0l6uVBAtXYkRlujIPeDysMkXJsx4AXyUB8Mo501OUXpYEHviBmxMRLYt4aYaGVoLIA9yrTHfhHgHMCVIAJ6H2JXUUK4jy5u5qtuAEAaA3YAiWpBm3vMldRXMpChK4PLgIGqEGBoT3KjeWA8AhuIf2gYaIl7wV5zgPTeCh8dRZAss0RTUjYiUHeg43hpQjcIOMKG8vhCKyzKlFlTBsjEKt0utVmq0BRBBxmvHAOCAeHOpp4MLje+MHOJwoh5Mg0ZTIDRHnzfE2rCDXW3Ci0NZtSpwnKLke5716bUv857zEiLQXyEFTUc/looN3lgtiKrUafjV3m1moZAi5PhzzXd28gchXIgQLJ5HXoptjAfIAxi1Scq1Bt54HAgeLANQ0QC9REubVwVyADw8Ho9VQW5gfS+L5hCF12TAv8j6bxKQoMFRhwXmFBtXoFGpDulabjqUpci32JlN0Fftd09Bs53FGcMFU5MTSHiQoiSkHZKPprgnfF0o43ExIPSxCBAQ4VgBIVJXcVIJMjrQBYs5uiVQXwvJunKGrwsvK+8KZ91cF/CQhkSTl83tRY5UWo51AIlzboy4cz4aWgYIQryCv0+qMm6Mjsdg3wr2iiBpFUPWeAMLDUHhoLw3DGf4Z7UVBrtOhKNra00BkywEjyEEtGn7+Em/BAy/MWWjtrmm0RqOx7e5GsakwY+jR7eluJYB4NWqNV2jtaRCdohYVmgIxa4Qm5faR8MWBulxOQkuiOIYCAyBdhNFJAFyXRhgNGACkdjfsc5qAs4SSybG8iAiAgAePWCm3p3jxQKjPMjG1DtPDnWYoaIbwSCSwSc5sNrQHIUPB8cDD4uQWwoUQOEEBJFJ/cjC71JplMZsabVCkiWXLabD0bGKSw0G4HE5vppHhzabikgqppEQzNcD4vVMrCXdFtoH3STmUzyIV27XFWosjN9smIOo0LQYFaNLSELUKVcG2QQq9Rj9nLqe86oDFgvpdphyPj5vqoqbkFBWIVJaLKZLYEs5e4vIDvTtbS5G5gJvlw1m/0VxFAkHK95ZQhQ49mT25KI2UDEUGdV5xQY5jcgCbWGGmzY582gJyLCbaypbCLeIilZmaoYAFS8OgZ0bbBoFtg4SaxpiujjVNhoIMByZT1f2IzFDkwYO8kPFUZihsoQgD+Bv3Ahstgkwry4D2ZTAwko8mM0sDuMSaJLeBT+Ml0xSbKJo1NmDG/LNM43HKrtLpbJjajFL2CaVxkdHDikajwdGjWkO4NLukP6SqnSVI/CmAFHLhq0YOMqzA0C5Xgw3wsKvkRsLDSiQ87XnQYJqATM4qlbcbNQYjodFjBqc+zeAy6JHx8ByNzXbnzKgLXRVB3AMLz0fILRSWqYRHEKbMUHjxTIMe9lj4XpvbECpUxn0wcfvwRX0Twk9v+N3ZuXiyhT2NDtx3uXilkS36ep2l/fFBg4o+frXM+otG5ZbMI6vSh32wumj5rM4MftveI+NfqDiCn29b8c3qy88/1Wvxgh0JO37fVMZ2+Icf+OfBY49ueenCpIpb3tsNi64N6eSEW7e+L+7X8t6aYwPss8l9U09XLfIc7r/1sTFPv3W1bRqyYsXB2pTqNaN+qz8x9Ph3C2+cMbyVuvDg2NSjs38tzWSPjFm8dfAF7Ln5gavvHAme+vbxM7Urn01GDo15Zv5N2ty3L3mqD7l49DbxiZYn7Z8mTWe/H7qpvviTVcNeW2qdPah31hfrnyoou37e+pvEz1VHyfPSGWvClV82F1ys/uCbR8lXNPSGr+bqv0ode/rQH1o39Tqap3p6YcGwKvHP/SourDjx9me9h6+/OU2dObNz78as/oeGJDV/tzZ5ppNO7hhxpaDp5OGTfzNcGbTM/vLpEULKDN/Zv/Y5PFru1HX86euB51ovvfJp7rjn/714z43EHZMuz31/x6tXXnKUZUz5+zML3rgYnFTS/O6bq7MIJCNjWv/3ppOejWdbH6kZ4DK8fbb4j6Byw+fHJjjG7Tx+O5ya3gnW5n01eTBP9yGcUfGEgzMixbMcTcQgJ8YyEbghIdzgAUfkyFOmq1GNNqU77sTDTUoP+InHHcKDw5MMwRAdQuF0uYSEUEUQlXO6rsknI7TT3EPZwwFPN1gIwQArifIGa6HdkWuemmP9OXhoV1ZMZgyJUJXWqEIfEokizv83SNR8JwndrnIMQfXwKv8JwLROjaI/jZiSHkBMhv8JMbUru3Ycf+9HkCCCI5Av4EnfPvq+lneIQ64LocYD5g2Dhbw9tD8ExRAMtce4sOzu69AMJ8VIJKqqIUJH7WMfaN+lLeYz+kf43EOhoaw9+W7esNV6SKwPX5LtLzzYvkti1Cf5x/jcW6Libu4/CF9koZH3sYwPXMRacV/re+ppjMPGtjA1qida850aqpwrcJVaXSXuHGKSNptU7+6aPx7MI7iJGfWYjsScCHC6SERrNKQhRqNGjTg1GgOpNUBUIfXrfDQuN8ImV7hZ1u0B96TG0D8DrJOFxWjHYdEykEV+BGuk4UYS0xLgfqzxA5wY2IUT5JKs/APjBr7WeTT9cv+RJ9b8Iz+rbuewtoVvzMf9ecsbWir8y4dOvL23rqzoxf5tX1YPuNTn/Ny5pqtntzUWb9ll+ez94ArmZlu7dsCJ68O/zKopGIEMrp//7NwFSQtG7V82exCZ2P/1j6mE3r79X6xvVa2qzX3neM70M2lTSj9a8+4jg52JHb49nsfmqK51qLfenFi1UKzfeXE9unTV+KmqwPVlfV4/VXbt5K7aN5OvDNlqG1x6U7vYfetf9Y4D/9n8nDnpXN7KXjWJzo8m/u6JXylO5I7e2WdBB0odvnzhw34vLikt61f9SZ9L1pqcc7byEWu/3X/NMWNGYKTYWXr4RsaGjZW3e0Wu6taTrt1iYkLCfwEe36Og
|
||||
@@ -1 +1 @@
|
||||
eNrtVs1u20YQbq9GD730rhI9FVqJlKhfwyhs2a4Vx5b/EP8UgbBaDkVaJJfmLmXJhg9N+wJ8hCaOFBiukyBBm/6k5x76Au6hD9En6FCSYRk24geodCDE3dmZb775ZodP+m0IhM29j89tT0JAmcQXET3pB3AQgpDf91yQFjdO12qbW8/CwL780pLSF+V0mvp2ivvgUTvFuJtua2lmUZnG/74DAzenDW50L7eOFReEoE0QSvmbY4VxjORJpaxY4DhcSSoBdwBfQwGBcvI4qbjcAAcXmr4kOieu7dloJWQA1FXKMgjhpG8BNRD6Px99empxIaOLm3BeUsYAj4PHuGF7zejH5pHtJxMGmA6VcIYgPBgkG521AHxCHbsNveGp6BX1fcdmNN5P7wvunY9AE9n14fb2WQydYIaejN7WEMRsNb3WRd68hJbKayntVYcISW3PQSKIQxFPzx/s/za+4VPWQidkVJOoNzx8MW7DRfR8hbLa5g2XNGBW9JwGbl5/M74ehJ60XYj6lbXb4Uab1+GyKS2TKr6+4Vh0PRY9N6kj4Ocbh0EGXcI4+oh+UHuM85YN0eW/9Toz6w13puvsa3tblWJ7Z2W5UM0H+jbsbNuFvWo1oLUDr24KvhOEO35GLxKtkC0WiiVVzREtpaYwZbKw6GSr+oJYWs3l1J39xeVmUKvq9czq+oZaouxR0z1wtdoD3exkH1new1nuWAtyOdzP+I2Vh53m6txqsHa4sl3l2/5cJb+8m8m2Vluz0wlEF7ZtY2ajJpa0owUpm61dZ2HP26Dr+7V1f5Gmdh/Mdfhhu5XbfQCHJcrXx+CppQxRRwjzql5U49/FlTYc8JrSip4W9RcBCB8bAL7rIWMyFE9OUYbw15/9USc8rS1fK/iz03mUZPR+G4xkQs0lFqGRyKiZHD7KuXxZzyW+Xtk6r4yibMUKvExI6Mg0tOOVYWNMJ7D9AgFyJpQmKb7eCqgnTJTlwlUL9JkVei0wzip3iv99LH6sbJwOdiSBjs8FkBHM6HyHbAzvBFKdfzPsNMKDJvXso0EnRO8HXXB41Dk0WGgYVvvQVUtHetZuQMjMt6MjfsDjMAiIuCJ6lteyF6OdKx2eYfIq0VSiar92SIDcOLZrI7+D5+hiEtFpDsl/d9tA8hbgFdbXB9VR/xi3CMBFAcexr93opVLp97uNrlxl0aRUvIkGSwzjaLSMK97dNhi5eKqK886VNbGN6PILfKmbmp4paEUwMoWCqheYWco3IKfpJWpmsmYRfsHa2gy9xMX0eYDFBoa3sOxGl0mXduI7Zyar5bJ5zHQ6YXvMCQ3YDBvzPM5BTCf8ABxOjZfMJIwyC8hQkFF/fnd1dqVa+WmHjCuL1PzhBOh7XHi2afY2IcDCRGfM4aGBl2cAvcoi2Zjdjd6WVCPHTC2DCtGzar5B5mqbfeogyDaL3ljZGaWs61llOuHSmWIe6zEYCN/24qS85t+fvDCopOXEsWIbeNvH04Ph7CCznea+PJBNL6ev+0eudrTr8xKbLxZae9VNHAS8sY/qHZ1IXc+b1EDfaMCwHySgz+vOvXOoEBSaTtQC0YrxfMFEbQZ1aeMYKis4KWjoyHijKyS4dRMxQ+Aj9Di26dcLGTAKtJHTWRzT4nh4OOFsz4COUlaT6MSRVCkfX404isLHAnix2+s5GI9AMENBEZ8XOs5JUnF4ExulIYYLSQWD28KqY2I4bkZWj0+mpv4/DF7TtTT4bJiQ9EGSPp8Q9GGCEkv8cMLRPRwx6k04uoej6oShexgaTr0JTffQ1OXhhKN7OJLcoN0JSx9m6asJQXcQdD8nipDcV8ZY+Wa+trrweGrqP/aerWg=
|
||||
eNrtVs1u20YQbk4FjB566V0leiq8EilSv4ZRxJL8k9SWY0mR7SAQVsulyJjk0tyl/gwfmvYF+AhNHCkwXCdBgjZNm5576As4hz5Lh5Icy7ARP0BFCIK4OzvzzTcz++nxsE19bjH31onlCupjIuCFh4+HPt0PKBc/DRwqTKYfbZYr1aeBb519awrh8XwigT0rzjzqYitOmJNoKwliYpGA355NR26OmkzvnVUPJIdyjluUS/kHBxJhEMkVUl4yqW0zaV7ymU3hNeDUlw4fzksO06kNCy1PII0hx3ItsOLCp9iR8sIP6OHQpFgH6P9+9uWRybgITy/DeYEJoXCcuoTpltsKf2n1LW8+plPDxoIeAwiXjpINj/co9RC2rTYdjE+FL7Hn2RbB0X7iEWfuyQQ0Ej2PXt0+jqAjyNAV4ZsygLi9ltjsAW9uTImn0/Hkyy7iAluuDUQgGwOegTfa/2N6w8NkD5ygSU3Cwfjw6bQN4+GzdUzKlUsusU/M8Bn2nbT2enrdD1xhOTQcFjavhptsfgw3VOOKAp9XlzzznkvCZwa2Of3t0mkq/B4iDJyEP8sDwtieRcMPtz5vNIjRaDqLtV2eVcxSrbarWj3X3Entb5Q9o6HXXN9s7ZaX7my3/eR+bkPZbyElo6lqWk5mVKTE5TigQPsrer/Ut3K7dt0oeoVkJ94nTi9lKJ16UGoulwprwf1aObnqrN9drdv9RmGze99tFfu1etIoZ0pBLV3fWd0trQR31gtbvrW8sVuvGMmVtc3MvbZJtU58U+xvGdueV11biAHkoG3pi1pVdfp2uhBfStfixfW7y9/Xteqe2TcF3cCNe62Cue/c8etFvEqmMGdSKSRPYKdlLStHz+l5y9jUbQkzfJLVnvuUezAX9McB8CgC/vgIupP+8/dwMiBPyncvGvuroyJ0avi+ErjzMVWOrWM/lpSTqZiSzWtaXtViK+vVk8IkSjVqzLOYoF2RoO1oZTwvCzGYSp9TsRgIA2VfVX3scgO6tXQ+GUNiBu4e1Y8L187E+2gmoN5ROjCoiHY9ximawAxPttHW+KpAa8XX4wFEzG9h1+qPBiR8PxqOTr/b0Umg62a748i5vqZaTRoQ483kiOezKAwAQg4Pn0Kip5Od8/Y8hjUZKTKSlXdd5AM3tuVYwO/oe3Jf8fAoBeS/vWog2B6Fm22ojaoj/zVt4VMH2jqKfeFGy+Vyf15vdO5KzUVP6t1lK+B6yo2SdPjbqwYTF09kftI9t0aWHp59Ay8NRc01ZZxLyVmiqpkkTidxU1W1XDOTbsop2fgdamsR8BIV02M+FJsSuJxFLzybd3A3uooWVSUF7SnLCzHLJXag00rQLLIoB74Q83xqM6y/IAYimJgUjRsyHBZ3Nm6vrxV+3UbTnYXK3lgYhi7jrmUYgwr1oTDhMbFZoMOd6tNBYRlt3d4J3+SS2RxJaoZBqKHpShotlStDbAPINglfm+qiBP2rSgsxBy9m01CPkU78MIiSclsfvniuY4HzsQPJ0kEEIlEhICloacXvJMtG+x7WgqVKLlvJPKo7Rc6SZZhj0AfWfATdOzkRv5Ch+Ki/wYDAPAgKPj9Ornat1iBoNA3JGaRkI9mBRC1CG8ICdcpLICA4sEW00eOCOg0DMFPfA+hRbMNrNNVMWjeaTT0VxTQZHB4Ln+XqtCvl5XlwYgss5Q/OlQ9D40MB3MjthTxGykiNgGPA5wa2fTgv2awFg9Lk44V5CYJb3GxAYqBCE6uHh3Nz/x8GL+haHf2bmJH0SZK+nhH0aYJiq6wz4+gGjgh2ZxzdwNHajKEbGBqr3oymG2jqsWDG0Q0cCabj3oylT7P03Yygawi6mROJC+ZJU6w8KJY3Sg/n5v4DB+yupA==
|
||||
@@ -1 +1 @@
|
||||
eNptU39oG1Ucz5g/NqeuTFFEZPGcyrQvubRJf4QJdklrZ9cfayNbJ7O8vHvJ3XJ573r3LjbtZm2nICvIHgiDwQbaNKlZ3VLdrNP4g3VCtWX/+IuhiOCcSnX+IaIrjvqaJrWlO7jj3ff35/v5vMFMApuWRsmaMY0wbELExI/FBzMm7raxxV5KxzFTqZJqa+0IDdumdulhlTHD8rvd0NBckDDVpIaGXIjG3QmPO44tC0axlQpTJXlJ6ZPisKeL0RgmluT3yBXecqkUIvmf7ZNMqmPJL9kWNqVyCVExBGHCoGJdp9LBfSKcKlgXFqRDW8GgEqhQi9mgQtSSK+Vq6WBGxVARKL53lKVUajE+vmqy0xAhbDCACaKKRqL8rWivZpQ7FRzRIcNZ0ZfgAnSejWFsAKhrCZxezOI5aBi6huCC373fomSsOCdgSQOvdmcX0AABkTA+UVeaw92WFIskTtnl9bkqcj3AYlAjulgG0KEYKW0U/B8sdxgQxUQdUCSJpxeTTy2PoRYfaYaotWNFSWgilY9AM17lfWe53bQJ0+KYZwJtq9sVnf+3q3R5Klw14ysKW0mC+EgE6hYeX1ryUkpW8FIJ5CogeyZWlMbMTAJERQf+unyqtEAdkyhT+bBHlkdNbBlCe/hQWqQx2xpMCbLwzFSmKJg3WptKVA+lgoI2/uFurJQ7ZZ+zAYedorFPfPy+Kr/X63yqOTQWKDYJ3ZCl8ZAJiRURTNWXVJFBqk1iWMkGbqiHbPFOAE3heXHukj3V1buNnb5wc4tqqI17O3e172nE6vPDCQ3yrMflcUYpjer4NIoABJGKwSIyngl2ttQ17wiM7QHtNEyZBUIwylOEEpzuwKZYJs8indqKkKeJ04EG0F7Xyc/UyooPRWQIw0ptTQTVgu2C9RLKJRSpBW0X7t+A2KQpTBf+2Ty0zlF41op3fl55ta5p8smyl+c/9/98R+rNL9++l549MfIox989cOzI7zuPHa9vmuo/GtpwOHPrH30H8uvfX3dny7VfZr+abri29erc3+9u/HbyyoFXPMGbN/Tep3buutizJSc/vfe2mXPTh/N6y6atA4eO5ILZr58Z2teVTZOrs/Z1xz2DuU2Pv/fNr5HQXH7uqJHLvHb54m8/Td6k/Kl/dP2K9OloML/t/L+RB0/e/sKWmfM/vFjTCx6a3DjFXBcGPune8dn2c88l7mLb7v7rsSe6b5k4WyafuTzZOHo8Nz89e//m9T/2P1LfX30ycuLjAry1ji/M0P6mNQ7Hf3JxBh0=
|
||||
eNptU21MHEUYPtpgKL/a2sQ/Bs+1Kkbmbu/2gLuLpuKBSJGPliuBNAbndudul9ubWXZnL70iP0obf/g9xpj4FW3vuCsnLUWbNH40Ngi2GjVGQ5UoJrURxWqJNcaYptbhuEMI3eyPmXnnfZ/3fZ5nhnNJZFoawWVjGqbIhDLlG4sN50w0YCOLHsomEFWJkuns6AqnbVObvVOl1LCCbjc0NBfEVDWJockumSTcSY87gSwLxpCViRAlNasMCgm4r4+SOMKWEPSIXl+NULoiBPcOCibRkRAUbAuZQo0gE94EpvxARbpOhKFH+XWiIJ2fyDq0FQQkoEItbgMvryVKYr0wlFMRVPgUPzi2ZFRiUTaxrrNxKMvIoABhmSgajrFjsf2aUeNUUFSHFOU5LkaF0Vk+jpABoK4l0Tv7gEWhhnXeMKBaAhGbstH2jnBfc0t3U3t2uSg7AQ1D12S4lO7utwgeK44BaMpA68P5pWEBZwBTdqqh1Ka7M8V5xk7R5Qu4xBOroXXIO84ahfj7qwMGlOO8DihqyLLLycdX3yEWG2mDckfXmpLQlFU2As1EnW/NlKaNlwZluVDnerhicAUuJ7k8Hv5PrKlspbDMRqJQt9DEiggrOXmumwTEOiB6Tq2pjaiZAjLhEOyweLzEoI5wjKos7RHFoyayDO5NdDDL06htDWe4mOizc7mioY50tJas8HSmkcvKTnfZuMYpic42aDo5cK3T4w/6fEFJcja3hcdCRZDwDWWaCJsQW1EuVVPJNTlZtXEcKfnQDf2SL74ZoCnsA77uEz1d/TFv085mYnfv2rUnNdBrx5v7tfZ3/+eFmDGItf0F2KW82e1SoE6qVaQIQJGoAnwBfz0IBLweEPF6/YrP76n3KXXppAZZnnPvjBES09G4HAUylFUElqlhucbe9oa2ltBYD9hNIoRaIAxjLIMJRtkuZHI1WF7Wia1w/5soG3oI7G7oZScDXn9A9vpqoRLx+6P1UfAg902JphUaMkuPp/DAD3ApTH409fdtT1U4Ct9G5dm5nVPitkN9I1/YlXPnd5h3VD9c/s3eLWzzCxexWjf6yab4PVcvfzVTPv3WgX+iT7x8oWZ7MpV+ZUbqewR/P754/rm537/MLfy0cOVa1Yb7njmZqY6dvr0i88stoZ9/8146s/nbptaqrR9fioxe/vVYMjT6XfZ+8t585Ya7bsqeeX78T40tWAtVTUfuXrzYO3N2CNGjVfnFa/1Pjvyo/kUPfvTqA/P+fGPwrGNTZfdAS9kO59fNhx+r8G5bLP/0j7dfuzDZ0zNwa7VyLl6W/vylF1sys9Nv/ntlfupm7eob05Mfvr5n0P94mcNx/fpGx9bJ+L2tfP0flrQqIg==
|
||||
@@ -1 +1 @@
|
||||
eNqdVXtsU9cZT5qtDRVqR1WVtgxxZ1HRh8/1vX5fZ2ZKYhKiYJzaDiRBNDq+99i+8X3lPhI7lD3CJDatr1sQLYVCWzt2m6RAQqCUNqjvBoa6jU1rXa2ok0bY1oKGEF27bmHHjjMSwV+7f9zX+c73/b7v9/2+M1joQ6rGy1L1KC/pSIWsjj80c7Cgol4DafrP8yLSkzKXawtFollD5YsPJnVd0Xw2G1R4UlaQBHmSlUVbH21jk1C34XdFQGU3uZjMZYrKFouINA0mkGbxbdpiYWUcSdItPksUCQIhIgISPXIKWawWVRYQ/m9oSLVs3Wy1iDKHBPwjoejAKQORl3hspekqgqLFF4eChqwWHYkKRq4bKt5LkdTWQhJBDqf1ZC4pa7p5YCHQg5BlEfaHJFbmeClhvpoY4BUrwaG4AHU0jOFJqFwGcziFkAKgwPeh/Owu8xBUFIFnYWnd1qPJ0mglHaBnFHT98nApF4Bzl3RzIoRB1LfY2jK4ohJBk26apA+lgaZDXhJwiYAAMZ68Ul5/Y/6CAtkUdgIqbJn52c0H5tvImjkUhGwossAlVNmkOQRV0e08PP+/akg6LyKz0Nh2fbjK4rVwDpK2k96xBY61jMSaQ2USXluwGelqBrAy9mG+SB2Yq4+ApISeNLM0bX9ZRZqC+wNty+NtuqEN5jAX6PRUodIoL4Va50g8W7U0F8C8mJMbEWclKBfRhGKEnbK78M3ncvucXqI5GB1trISJ3pCGsagKJS2OqVgzR3uBTRpSCnHDjTckfLJEOM6mBB+3JUBpRdYQqKAyRztAeFYhoCVweLa7gKwmoMQPlMOak2Xm+wfS/RxrcFyyr1+kmAGng48hg41PVLYoqlwKgwEBUTOzDOM9UFmZq/0wzpUCNAUo+nga4D5HAi/yuJ7le0WmmplzURR17HoDHSsLC7rgpMrXifkWKhIxaaXY19w4GYZ588ZGc64c2ITxMMcXWmloPhraLmrHrjeouHiJ0kbTc9aA58ziSvzRDTnG4XAih9PjgN64K+amXSwd57ysnXNDxsm+jpXPs9hLiUxFVnWgIRbPJD1jFq0iTJd05nfQLocbZ1pH8BIrGByKGLGAXMpBqyMUFQky5A6yccBCNonAbP+ZhUDn+vpgS+NwBINslOUUj57+tLqmu5uNd8dE/7o1VHOID7X3hqk1XeudKY3NeGLd4ZBmGC1xJRBq6TEeHpC0tSovA9rj8Hq8DEV5AU1SJFYpcDEpb5ILrW00WtJNVKC1IRTs9darQW8kJnUE6Hi/t51R1DUBt0RFpai7K6FCUW/qc3Kcm2v3BrsMkmwWtCZBbpd7Okl7Z1v3OjvVj7OBetJvqyNwb/K4vv6KQgBWCCjpg/bZ5/RRR3DlGvjJhdOwjliLx3lIEjJ1RKRUTISfUEQRXkf+9bKEijtxDYw+nvNT9UxnbzoQbWXCSTfk0qlA2Ej2t1MsGWxIe5L1altTfVci3GNXEvOK4KY9gKrUwU05veUuvAb9/0R1tAPMFzwIKbPnVkGSNYmPx/MRpGIBmcOsIBscHuwqyjc2gXB9pznBUJyLjdspDrkYbxzGQQMemXPe/jcecqVToQAF3GN9rHk46fBbfE6nw1JHiNDvdWM5lU+3n+VLPSkl3q9+Z8WvaqvKV81j4bef+D31vcnzD928f9nOXeTfvhr8fPA7Ny0ia8dGpo6sOml9Qizu0DeOzNSpZ8TxmHns63+cPX15i+Pys9W1Z2K3Nxx5uv18cGbmxOvLVs8Iz01v+f7Z20Z/8aM9D927evmJrd88uOGVd5dfbLsU+Th1zic/dbL4yw3V97Hhzc/IfecuHr//lPnXvU07Pr33N9/88dzyzlOe9+N7lqITZ59/Y1PzrdlP3j24uOrz3kd3hEYuPfLlix3+Hyz/4P5ld37buuSntR+tDEzd/cD4hHWD45W9aCJ7ZcWFmuSZt0BD1P7CrRdrm8cJMJVd/K/PHp8cW9F2crVlorb5nrev7A24ijdPNbQu6WnIdoOfLOr458P53FHP9GP7yaOPpx5Vtqenbengrtim7+78XZGjxy/HuNA9b13w7pvpjP152xc/3Pjlj7PBwS+urFw1PWSd+mw39+GZ019PD1wZ7Boy79t97O6DbeEjL5//KLN0/OO/3BU4OvXv13Y/M1H4E7fnqrl/0dBvHX8/dcsfcvtu2ffrm/I7/rOxiD7IwiX04tyh2DtPffXC2K7J3be1vLd96pN2jHLb7c7NkQsj6rbjO7ePvHpH8YFva6qqrl6tqbq0btXWTfj9v/DeoiM=
|
||||
eNrNlwlwE9cZx22YFkqGFqecoRkUTTgKXmkvySs7SrFl4wvbsmVjyw6jrHafpEXSrry7smwzHIZAIECchUBCSLhsLGJcYwoEwmFgQiBNOjSBJo1CoECGMrRAM4UmENLQpwvLsTmSaWe647G1et/3vf/7vve99/OCYB0QJU7gkzs4XgYizcjwRVq5ICiCWj+Q5OfavEB2CWyrucRS3uIXudBklyz7pHStlvZxGsEHeJrTMIJXW4dpGRcta+FnnwdEwrTaBbYhVDFb7QWSRDuBpE6vma1mBDgTL6vT1XnA4xHUqWpR8AD46peAqJ4zM1XtFVjggV84fTJCCoiX4zloJckioL3qdAftkcCcoAvQLNTe3OoSJFnp7K1mO80wAHoDnhFYjncqv3U2cr5UFQscHloG7VADDyJrVdrdAPgQ2sPVgbaol9JF+3wejqHD49pZksB3xDQjcoMP9B1uDytH4AJ5WdlVAkVk5mvNDTBtvArT6PUavKsekWSa4z0wD4iHhnrafJHx/YkDPppxwyBIrCRKW9S5M9FGkJQtRTRTYukVkhYZl7KFFr16cmfi96KflzkvUIImc9/pYoN3pwsSGgyDPzt6RZYaeEbZEsn5nl7eQBYbEEaAQZRNaGc8QR7AO2WXspnSbRWB5IObACxsg16yX1rQCmsB/vBeMLYbNpcUxot4NmlUazasi3LQ4udTVQSqKqJFFY7iOhVGpZNkOqFT5RaVd5his5T3W4Yd5SLNSw5Yipx42YOMy8+7Adtu6rfgB8MFh4sJq4ebEAH1PkECSEyV0lGFlEXbAMnP3hndXYggOmmea4xMqxyMVD7QWB9gGT/LuuoCXtTQSBKcHfgZx66Yi08UwtNAQYhXUloIPdEZG4nnvh2uFUUwFEGxffWICFPh4bwcTGfkd6wXJaVVh6Lo3r4GsuAGvKQESTTydCdaiMALaxaeuycMaTAYDvRvFA9FGMKPbl9vKwkkqsFwr7S3r0EsxGZU6qiPWyMcq4SehC82O25gAK5HcVbHGAgMozAcw8k0lsYwUucg7W/DPucYGCVcTJ8gyogEGHjwyA1KKNVL14f7zEhgOkIPV5qh4njG42eBxW/PFsJrkDJUPhF4BJrdzjgQhmZcAInuPyWYbS3OLMo3tVugSJMguDmw8rPk0TYb47DZvcZaypJNY36Kzq8uy7ISOdUOuqGSt1oL8uVSRy2FcY4i3msmcK66AsHSSAIKwNN0CKZBNbBtkKpyShINsypzzTlSXlmmPmAIlFGky6y3kbTfk5WViVoq8oqtpdWFJqyyqo6tDniyKZ1sszkklGBtujSrhkMbKTtfIbLmTLOrttZdag5QFVXMjCqhFreVFVmr3Y21hd7cHLhEWnYZtRkquGE5mHRjrG0Q2DZIuGkM6Vi8aTJUbCQxRk3vIzJDlQcP8hLe05ChsoQzDOBf2gssnAyMxQIPQi/DxPjrONZYLleRRIHT7LSiGnOO1U8SWQ25VXJOUUAjpZU6XXmUt4DES0wuLjMhMxSJIWgsOXqUpCJbs0f6j1T1VhWSeAogJb7IVaMEeUHiOYejzQJE2FVKO+MR/Cw87UXQZpqGlGValV0GnDIwuI5iKYPOgGIskgXP0Xi0u2dGa/iqCNIeuPHqGGWnizCq4RFEqDNUXtpI6WGPRe61prbwRuWd7yZvH7dscFLkGbi87AP+NDrswN+nfGqsWWTbYbENISe9dvPlATmzJw1oUlfiNS8UvrRtL39unnZSaM3ngz5c9vOMjJa157vZrPkfdaVsrFtZ8YX3u2umT7Vf/eLm9VNnds7T7jndvMEZaG6+AXaf6W7Kt5tvDR2xwbp9hO6seqxo7BhQcKw1vUY7atnHcnf35fmLyc+ytxkvFb4/9ovpT2848cot+Y0/8iVXHh+zKEV/wT3huWHaM2m35MfmHZ42feKl/IXY+4/RkvXRAQNCZPKYlqWpj7/DJo0wTPuz9VCu+/LqvcGD5pNLB330y6NN1UdaL56/+urorlkt0/8603118EsnUj4Y3r3zWznrxVGr1swat3X5odJN+ImjM1PM08Tfm9l/frjp+PQlwc7lX00ed3PqiksLLmfUHdp/9eNn65uS173ROGDCmjryN6s6jw5ZkbpiRYsmM3+3yYg+fwFsG7pivmf4rRJi3qlrzNjKO83fJk1atL501NMb9X9L/7JrJHrm0qFPNCMCi5+w1+uHPKIffZJwdbDu29ua6pcsLyodU8l8eeRG0HJr8zMdlfsmF84ZGMJu/yQp6c6dgUmFx5dOzBuYlHQfuBmfCDc0L7tEwccxcb6JY0yUa1jINXS9LXraqdMxFCdTe5NOItek9uGeRNJhPDQ8xBAC0SEumnP7kTClSLJ6Ts8NmRIFnR19lP041unFCWEOEPyy8mZxSbktN39GTvF/A4X2ZMZlxmkI1ZAGDfojaSjq/H9DQzvuFqHXLU4gqB7e4j+AlVowFP1hsDT6AbCk/5/AUkjds+LEKz9KA1ESgWgBD/nQhPta3oUNpTVMGQ+IG2EKZVd4fQhKIARaHkfC6v7n4XifPw4hMVVtUTAKTX6gfY+2uM+Eh/C5h0J9dWhif96w1fpI3BK5H0NTHmzfIzHmM/FhfO4tUdWf+/fSF53oyftYJiYuaq26r/U99bQnEOOBMDCiWG6ariyAZwkmg4PKLbYEpgkNwKJ/uyd+IpNHSZMw6AkdS9gRYHewCGmg0hCDAccQO45TLElBSmH1LXUcrbTDJlc5BcHpAfcExvD/AYJdgJuxnIablocY8hCYwVAOBifsFH4fzPgeSTzaQxJsc2bhQGzYojsFr0sj33IjrxWm5FRvHXVk4ZJUE+7J2rNKt7bqlZN3NDXIpPU/Pfv1wUHS/HOHD2sCXmON8ZvbZy5wcwP754SupC1rHjS3O39j5rGmCcvEYy9ONb3w7+HPnnjikd/NXzxT/tPC5NNMbfnMGZWrj3chFZa069Yz5MYj00bak683UsKQOcVXT2FdQwqeGr/PvG1v5at5K3ZMGjUwULD5ndo3P7lVU/pey82fdazOmbH+m/PsPxbeHP+rv5DsUtO6wRKLTr1S/eul5wexjdqjaw77Ls8eevHGOsPUc5vyptQWvn7kGXeXy7hbn1Kw5PPvLv5r7cSsrze8qz60ylH31Nzk6GXdOOOaJMPP/wFXB480
|
||||
@@ -1 +1 @@
|
||||
eNqdVWtsHNUVdhpoU2FSUihUahGrVUod8F3P7Mw+zbZarxO/Ym/s3djYKKzuztzZGe/M3Mk89uGIhrokkZIINFFTIqgMdTa7rWOchAQCAUe8qhoRMGobVSZpaEgLgiY84qCUVm16d71ubCW/OtI+Zu6553zn+853Z7iUQbohYXXJuKSaSIecSW4Me7iko40WMsxHigoyRcwX1kVj8b2WLs3cI5qmZgQbGqAmubCGVCi5OKw0ZOgGToRmA/mvyaiSppDEfH5m/SanggwDppDhDD6wyclhUkk1nUGniGQZO+udOpYRubUMpDsf2lDvVDCPZPIgpZmAxUCRVIlEGaaOoOIMClA20EMlEUGeYH+sIGLDtCcWozkAOQ6R3UjlMC+pKfuZ1JCk1Tt4JMjQRGMEg4oqvdpjaYQ0AGUpg4pzu+yDUNNkiYPl9YZBA6vjVczAzGvo2uWxMnJAGlRN+0iUgAi3NazLE9pUB+3y0i76YA4YJpRUmfAAZEjwFLXK+ksLFzTIpUkSUJXELs5tnlgYgw17XyfkorFFKaHOifY+qCte9vDC57qlmpKC7FJk3bXlqotXyzEu2u3yH1qU2MirnL2vQvnRRZuRqecBh0kO+1fUxDw/MlJTpmiP+j2/1pGhkRlAPyuSXaZlDBeIFOjEVKk6DKPRjnkNz9TcUWgmstiTfYivd1AexxqUdLgpt4d8BT3eIMs4Wjrj45Fqlfh1VTgU16FqCESJ1fOqlzjRUtOIH4tcV+/Jst6kmTJ6MoMA5TRsIFBFZY/fD3rmXADamg/PDRfAegqq0lClrD1ZET47lMvynMXzYiarUIEhlpGSyOKEI9Utmo7LZQggoBj2Xh8bmKiuzFM/RnqlAE0Bij6WAzqhQpYUidBZ+a5a0bALHoqiXrg2wMRpRExbYqnKdXxhhI4Uolm59tU0bCAQePn6QfOpGBIS8C9GQxRFC9HQbsV44dqAaopRyhjPzUcDibdnVpKbBAVplnUzEAr+pJvxeb1ulmME5Pb7hCTjY+gXic0ljmQpi6lh3QQG4si5Y+btmXoF5so2CzG0h/GSThsdksrJFo9iVrIZl3swGh2ajmQM+QOcADjIiQjMzZ9dau7vCne2RcZiBGQE47SEdr23ZGkiwQmJpBIa6G9xZ1NCJBHvGWznk7pgxo127FtvxTw8k1VFRWDD3uxQphUTpXyM3+cPUBQDaBflIiYFjNjd0pfLr3Fb/lhfot9Mu30GN9gd7fatH8hBSxc3BuQ1HUJ3humQI76Bge6wIeDuDrNTNtlmvre/i17XHkvyeSiv1RK9am6wI632ZEk30BRDDY0OMpsS4TdUdQggDgFlf9BB97w/Gh18hYOQa/Fh2OhoJUd2VJXzjY5YmUxEfqGCYpKJQl1YRTM/JxxYGYkPyYkmd75zYC3l8WCTNSjezHSqOEp0s1oja1t9vWxTqqd9MN5ihheQ4KM9gKry4KVYf2UKr0L/P1E9fz9YaHgQ1ebeTSUVG6okCMUY0omB7DFOxhZPznUdFSNrQE+43z4SoHgPJ1B0QAhAvxCgQBM5Meez/e94KJRfCiUokxnLcPZhkQk5gyzLOBsdCgz5vcROlTfYT4vlmVRTv11SvGvHsprKtZR8rlzZ2fPqo3+gbpn86N6tAV/dzRu+PRRetvyt3d9rqtMvtH2y4XecnDqKTm5u6Xrv7u/seOMH2y5/OPmyb+Tx2prnXgNPn95Rtyl1Mbrt07N94EenLieWv38cD2aPbgo+8dHFoycfu3Ni13D2WyvPrf6yV/+sdvXbmdF9P1yx6sWJ/Vlr/ea67frKoV5+17hBv6+dSe+ffbbOl+/+29//OX7m8C9u0276sbvm4ce/eFIfWP2N002HjjlCWxzB1y5Yy2pCTz7y9LLwtjr5jZNr25+3z3/33yM7z/q06WnHli0HqG/+ZWppc+8H/9i46tIfl4cLn2eoD3c+NdFWm31n1vXLr+49M7j/9eNfrPzaqUt/umXm7NSI++GnuHNnizcEpyd3dP615dk3D46c1h6cPfHM9FuvTHXvufEnK/68x7N99N2762/f2jMV+k1/APfVts8OT92++8ry2Scuxx849enm86mmdt2On7tvFcA3TXMjv1/xn/T5Yx/Q1NcvRgd6bvV8dal2+wC952OW+n6kcFx67sJU44HSv8QNsd2Few69dKr1RNfMXRUxltZ8tnXjXkCU+S+tood0
|
||||
eNqdVX9sG9UdbxXWlZWNgjTWqVLxzGijkWff+WzHF2NtjvsjaZTYiZ3glGXm+e7Zvvju3uV+OLahA9LujwJauamFVl02SBO7DSElSygFElhZWwqkSEtFpVRi1QrL0KCbqjKY1G3Zs+NAovavneyz7973fb+f7+fz/X5fXzGDVE3A8soRQdaRCjmdPGhmX1FFPQbS9N0FCekpzA+GguHIYUMVZn+U0nVFq7PboSLYsIJkKNg4LNkztJ1LQd1O/isiKrsZjGM+N9v+sFVCmgaTSLPWPfiwlcMkkqxb66wpJIrYWmNVsYjIo6Eh1bqzq8YqYR6J5EVS0YETA0mQBWKl6SqCkrUuAUUN7SymEOQJ9r2DKazp5uhyNMcgxyGyG8kc5gU5ab6YzAtKjYVHCRHqaJhgkFE5V3M4jZACoChkUGFhl/kSVBRR4GBp3d6tYXmkghnoOQXduDxcQg5IgrJuTgQJCH+jPZQjtMkW2uZ22xwvZYGmQ0EWCQ9AhARPQSmvv750QYFcmjgBFUnMwsLm0aU2WDOHmiEXDC9zCVUuZQ5BVXI7x5e+Vw1ZFyRkFgOhG8NVFr8KV2RsNE0+Y8s8azmZM4fKnL+ybDfS1RzgMHFiPk+NLhIkIjmpp8wBj+uIijSFFAHaVSC7dEPrGyRaoOmzxUo1DASbFkX804rvDW4muphTYUOusTCUpRmqFgflcFloT53TWcc4LNuaIyOBSpTITWUYi6hQ1hJEii2Lshe5lCGnET8cuKngUyXBSTIl9KQIAcoqWEOggsociYK2hTYAjZvHF6oLYDUJZSFfDmtOlZXvzWd7ec7g+VSmV6LYvJMR4sjgEhOVLYqKS2EIICBp5mHGXTtaWVnkfpjkSgGaAhT9WhaohApRkARCZ/le6UXNHHRRFHXiRgMdpxHp2qKTKl9vLLVQkUQ0K8X+2o2TZdnJmxstumLY0uV6bbmVhpaioR2SduJGg4qLAUobyS5aA4E3Z39IHmK1LpiADMMybpZ2OBwJBqE4Q9N8gubiCZfb9Srpc4EjXkpiKljVgYY4Mnj0nDlbI8Fsqc98DO1i3CRTr0WQOdHgUdiIb8alHDSvRVGRiCF/jEsADnIpBBbqzyxu7mzxNzcGhsMEZADjtIB+dXHluliMS8Tiks+v01uMdNIRCQqxls4tDVFd3g7FjhRksYQ825qM1mC4SY0LMMYButbJEACOWhrQNspG2gb0MIm8g1Ubd7BKg7tbVLV2Vu7UG3rk3mZ/fbhV4ppsAVpuSwu5RA7ucHGaEA2gnF/vaIBpZkc0ryRCMQeV7vD7G5KcH+P62m7/dkXIRxO98UhEoPLNTWzOzeSdqWaSItRTPrvXQgpWIKT7Km0DSNuAUtOwdTRpGrrUNF4LXybGZ1s+Ir2WBjLIg7KY81rCJYYR+YUSCgs68rVgGc3uI8QYGYH3bc9qOwI4p7VuxSk23cOIuMPBh7gU2541PM7upnhrt78jVL8VZtuXMMOytYCqkOOmnJ5yaX4N/f9EdTwKlk4BEFQWTqyijDVZSCQKYaSSrjKHOREbPJn2KioEtoI2f6c5wTo8LOdgEAddtSxFMaCezNFFb1/NjMHSUVGEIim8DGeOpxiflYwgxuq1SNDncZMeK59rjxdKhSonT68cv/vJ1SvKVxX5zs8/1Xbyl+eptVN/vW/61+v3TXx4/PO+W7551609j695rv3jlzcdPXk1+gUv9H7w6LaWixt/0bWn6TvX5qYmv7w/eHDtQ07xG0f6+Q/yh968vnbDhh/7ztgvPXHs6rXrh06f+9vrl+7+4kohBKbv3PX3RwcuM5++PHDhQT4gP9b+k3cORN77pzoW/e41Y7YKCBN7LmTff3bfK2cOPvKz6WdzY++/d27P1D2Hqbc6/n1299rLR+c3/rblow3HBw70bwJnqtfvvoIaV6/mz9/CP7Xxd/pt43dEPl7Xhf9z14tDD1x+5udr7u3/V/WqPk9o04GJ+yf776y60LXvnclPR/pXzLDxg299+cw/Ws5ejc+0fVT14dtvHgmtT14e/Vb1Pa/y1Y7cJ3/kNp7qim3t3X/7I89d0E5/vub6JWmVa13soVPvhoO/PzXTMpec2R8dMofGemburT6aObJtLjr52cTztz7dbJ1Pz7FXfnC859z82b2PtX3yfeMvVd2HR6v2plee3/nGROboidrb7vjDrtbo2Mmnf+PngNf702+fCvDiCxdPrNp1e8LTf/GBQ/ttc/gJ1Fmfmf5vWZmqFds/2/vnBiLT/wBOWZm+
|
||||
@@ -1 +0,0 @@
|
||||
eNrtWctu20YUbbdZdVOgS5boqtDIpEQ9DaOwLTsxEkeOH0icthBGM0NxbJJDzwwtyYEXTfsDXHXd1JEKw01bJOg7XXfRH3AX/Yh+QS8lObKR9KF1qYUgzty5j3MfhyIfDg+ZVFyEr5/xUDOJiYYLlTwcSnYQM6U/GQRMe4KebDS3tj+PJT9/19M6UvW5ORzxvIhYiHmeiGDu0J4jHtZz8Dvy2UjNSVvQ/vmnD8yAKYU7TJn19x+YRIClUJt1c5v5vhEwAxt7Yp+ZOVMKn8F6rJg0jz/MmYGgzIeFTqSRI1DAQw5SSkuGA7OuZcwurloiGpk06w9MHhI/pqwVpzbHYsc5U7MgggB1LGHNylvHQ49hCtH/8dobJ55QOnlyNaKvMCEMDLOQCMrDTvJl54hHOYMy18eanUIcIRvhlZzuMxYh7PNDNhifSr7GUeRzgtP9uT0lwrNJ3Ej3I/by9mkaNAKHQ508a4ITi2tzG32APjTsfNnO21/3kNKYhz5giXwM/gyi0f5PlzciTPZBCZqkNRmMDz+5LCNU8ngdk+bWFZVYEi95jGVQdp5eXpdxqHnAkuHyxsvmJptTc8W8XchXv7miWPVDkjx2sa/Yd1cOMy37iAjQkXxmDYgQ+5wl53+2WsRttYOFWyvW9SZv7hxsWiv3bzv7ivQr7dZmU8Xxmhs1mmt78Z2jUN2QXCC7UqxWqjXLqiI7b+UhZFSq7Vc92ryxHK/1Vq3GzaXm+kF1Ua5Xt9rhvYbtdqs7tUiuNMqhtR1ul+93JA706qFDaZnuVNfvx/n8dV+t+mJH7O3mC7sbrVsFqztvgHfxIacL1mJt96DX2L5Z2/TKmPb2G5ux192xSH59qVfxFuXG6uL9zuZeIepccq9sV5A18bBsOVUr/Ty5qA2fhR3tJZ/bJfsLyVQEFc0+HgBkOlYPT6AO2W+/Difd9Kh5c1rCb540oCaT53cZzRlWyVhlbaNgFUrwVS+V6yXbuL6+fbY8MbOdluC5oVlPz7HDdGXcRfMGtLBUTC/E2kXVb7YlDpULdbly0QND4sXhPqOny6+s/udp9UNq03igmRHrRUIxNHEzObuHNsdzBa01no5bDQnZwSE/GrVC8nzUBt2jXpeSmFLvsBtYtSOnyNssJu6zyZFIitQMOIQClZzYVs1+Mtm6qMRTiN5CtoUs+8cegr5nPg84IDz6nkw3OFsC+L9/WUDDQII5OHRG+bF+uSwhWQAlnBqfqnFqtdrPrxa6UFUEkVql9uNVKQD7khq7EKjvXxaYqHhkqbPehTTiNDl/By5aNsYVWnZsWrHLAAUp1+y2XXAYcZyS7VScHyC5nICWNJuRkJBtRmCU635yngtwL506C0W7VCxDpPPGZHxuxe2GSGNQ80YkmS8w/Yq4iGDiMTSuyGTY2L29uL62/O09dLm0UHM8jJNhKFTIXXewxSQkJjklvogpjE/JBsuraHNxN3lWs2iJuEWnVGtXqy520VJza4h9cPKQJE+94oJZd5yiOW8EeKFahnyMWOWjQRpU2Pn9rVOKNa4bMPYpMEVKQQQICC32Onv9u/3Npe7OjTtyxbWPogOvqA5uNSv9AEhEtPegfCcn8lPSyo8KHAQINIRmoPOid23rlYSEoNAcZFWQXU25CQLlhLU0Bwqrm8AVOPZ1utFXwEAtF3xmMgLXU9tu1KoUGK3gdskhqU1PwOExTfKQsh4wVQ6U+BqnxDahRwyVDwkIU7VTMk3pk7nAeeBfGPs+UJ4vOtApbTVeyJlgnCuvBYEB4UykgGUnPDm6vHbt/wPnFLu7Xt/M8PrveBkUos0AmwEw7bEMsFkAUwRIIoNsBsiIFN0MsFlqrMvDDLBZAMMZXrPh1cUyI8pZIHvvg/CDrMhmQWyJEQx/mjPMZmnM7G5sRqbEKgNsFsBErNO/6Okzswy4WYDLbslmnGQ868yZAHM587N7slkgeztD69/Q+neATKVFZP5PIJpG+cAEUIJIt8avMMAfO31ofeHvdLmaM7XQ2H+xUqjlrp5tUaYx90eveEdvIegLWYgNx5SL6cLxK6xcVjBOC4TzDzpgYfQuFwxFklFOrnhspc/b0yz8zfbx8Yvsvt9o3l758Nq1vwCNNtHn
|
||||
@@ -0,0 +1 @@
|
||||
eNrtVwlwFGUWTggEXA8iECDRNU0LEiE96Z4rMxOC5ObIZJJJQgImTvV0/zPTyUx3092TY0IQEUEOgw0IK8dqIMmEkEMqIEQuWURURCGehMMrK7hZtNQFBHWz/0wmkBS65W5h1bpl1xzd/b//ve+997/3f/8ibykQRIZjg5sYVgICSUnwQVy9yCuAeW4gSovrXUBycHRtliknd6tbYE5NckgSLxpiY0meUXA8YElGQXGu2FIilnKQUiy8553Ar6bWytEVncEPVaIuIIqkHYio4eFKlOKgKVZCDWg+nDBRRCQHQMoACf8EhGGRJE6UOPYhNAYVOCeAYm4RCGhVUQzq4mjghC/svISpOczFsAyUEiUBkC7UYCOdIohBJY5z9hqSKnjfdJub9bsFRa/fGipRlnT5Ru1AsgSMQwEaiJTA8L0yaDqQBoAjJYREnBxF+sYVUJwnBagFBk70aeQFGA9BYoD/qU/Odx9AApEyrB2tqoKuwfgyAqAh0BuS0MWAJGctBpQEJauKqrwOQNLQxKpaB4yM3DIw8K0kRQEYD8BSHA21y812D8PHIDSwOUkJNMJos8DvtNxYAgCPkU6mFNT3zpJfIHneyfSajy0WObYpkB3MB+Tm4UZfLjCYSlaSd5ogiMQZsVkVcIWwCKHQahXKF8oxUSIZ1gkzjjlJiKee94/v7T/Ak1QJVIIFVp9c3zu5pb8MJ8p1RpIy5QxQSQqUQ64jBZdW3db/veBmJcYFZG9y1s3mAoPXzXlVCoKAnx0DNIsVLCXX+VfR7gGzgSRUYBQHlcg1eEtfgJyAtUsOeauK0DcIQOThggeP18NpkltcVAuTAd58zRtY+FtMs/qyeC5oTG0KTIy8P8fNxiAqHDGSAqLElRqE0BnUaoNKj6Qbc5uSA2ZyfzIPO3IFkhVtMBepfXn3Ug43WwLoxuSfzPh+X8ahNz74sK4wUM5zIsACqOSmAszcW/LYjJS23uWFcYKdZBmP36y835/6Mk95GU25adpRWubC9R61irECN2XbGZgCa8BnBgLCXKK8VaPXtQRG+oLfCH3FMQLHcOKlckyAoXAyLgbG0/8b6DuiXKvBcXzPzQISVwJYUfaqcf91oL+EAFwwaT7bN9So9Xr9vp8W6lOl0vsu/KWBUiLoj4ZQusQ9NwsEVGzBxabyPmmMoeVT4+GDhaQ0NqtSqwE2FanV2wgdjhO0Xq206uk4XBunbPc1BApq8SWT5wQJEwEFm6xUIZ+KcZHlvkJLUBEalRZ6Gg97I+V00yDHbU3hfD6I8QgvACdH0q2UDaNIygGw3vUne1PmZCYaZyQ35kCQyRxXwoDVncFjLRbKZrG6EoRZlnkzmelpeL5DbVeydDGtmIlP98xzp8zO5fNmauelFDt06WVUko3CiDi1CgJQxukxQoErYN1gCi6lgDEKuU5nmpkn5qiy+PJsTQmfkaFyZJakaEoEzoOrSpMFyTpd48iIU5lNiQ6FB49LTU2zMyCNMrqEXIciL0WT606zOfJna+ela92qfLFMn8lWgIwMZUoS60y3qKczRugi7L0JsfEIXLCwYYoJgbLBYNlgvqLRG4i+oolHaH9gEhQDe2Q8Mh1uWibWWRGP5PgiDOA/bNw5jAQSMjkWnFoLA+MuZegEj0Ufl6bMBiaiQGHibXPnEBB4rqKcMLkV+mQ9bbSraE06MzsxQ9cvMjpci+GB4Ghxtc6/NG9A/y9RvViA9e8CmMm/L8HkspzIMjZbfQ4QYFXJjZSTc9Ow3QugPjkNMyfOkXfqlTo9pYzDNbReo8c1OJYEG2mftus9o9a3V3hJJ1x4pZTc5lAloLAFqdB4xEUm6LSwxvx7+GP1vTvXkUG3Ra0YFuS/QuC3p2dlTtGK03jY/CutYd/PV7zlvedg0fPGmoSOruhRGw5Hf9N5Mm/d4vEWxeIfrhxeTsW3T353eNrfHaV2e/7RhWEfR4wf/GG1YXbx3W+NsUW9ve6jA3N/+DEyyrZi6V+nRB4fM/nbH08eKjqeddpOf/mXa6/kbhtiaOmKsIVuO2zoRu5blfaOY+zbU7Ia70g1Tt55+9FJMe1dn+CR1Yej7xr63D2XM+ctGT3im4nI4qe39nxgAEtan376wpcjHjw5dy4y/uKEpJejs9MXzj3WvPWbpvbgZ1ffBbCi2Un/CPKunR4RvmXy/H9W7xfto8PR/a0LVj7anH9s15+bFrQcOGO6vKp7/dLzqa3n9JcSKxMrJswpMQ/nPnvdSCRN6+gQb5e+uG/b6Zr2mpGD3h2Vt6zjvY5P9gTXjG0eySKnXi6ckx9l4lPjh0Rtfs5TPfbNybOOVG5vWPvG1UHntntD9buouxtPpLku3hkuKNrP1oVuu3T2snG4Imnf4IaWz//Q2WDe++62gxdWZG7440L2koHHmWnee8OL5nuWWcAslbT2+x27N74afZp8n9j71Na8V/OGD/NsPjjm2+wvxSuFo3vWHAgZm3wRn7Xr0Mq27iV7S0K7l15tLWk++gheoI7dRP5Nt8ETGfH5zhHHR+4e2v1Chqi4Xd3J99iQD74oHWVqm7/3+ANhmxbF+fMdErTE8LwicXBQ0K2kiIOMt4YixvSbybqdzuvDJNyRYGeE73v5oYUinT9LEhlIylCfgCXRnaHOM2ebzLwmOys7NVFTlmMuNRan/xImSQp2twsigVbQysLrHK8QNSCFaC/+QrQK9RG8/rDRGT53RTfLVihuuOfD3B+65Rdg/J0wnwsK+50y/w9Q5nrKT0DkzuChv0UC8itQg5sOERrdf3iIGP3vDxFq/P/oEKEilL+RQ4ROe+sPEVZSAwirmtbGaa1KZZyGtuo1gKK0ekKr0ZBW8OsfIm4BD1XbAKG7dTw0uPkGD11pTmQhBd3XHT747dkff1j9zHeOly/URC8P3b8zFcnqmtm57tMRXY2ZYd99NWTCUXPHqAupSzdvPvkm/5xq3PjI9pHm3cvd510HesKqnzj8eXlP3UNTFkw9E7Xl2oGzUVM121d91IUSDddu22ApyN9g4B6TzwzrDlo+btsjYzZe8uwtDC8LOSEHb3Q8dX5+csXSu759T9j1+vrUBVWVtt2LI7ecWRb1bFjSlOEfdb0TUb42fkfhvgzDk9kapGz9hHE0svEB85En5Weo9j9NM1YfuvP9gy8i1c/veQI5sSwh9YGJ+PIP469IGvuQhu8bVosfPBwfslZ8ZdKEqB+mbpLfOVJT9+rQxk/vaWw6STqPBU1aPYtF16wP8byxuHzshguWJ9ZMzTrfTl8bcvir5RuzMzN/NH18NoKyAu/XF21fVxcwC8M7dZPNxXUP4yPF4qJLoUwo3fbao5UVpsqVT7Vcjbp624nIKY8/WBX0+DX3OQabZnjr0eaiSffdcWzYJ3HF0RHjau141vZz1RPv/yyHvvBGZFHIoaH3flV/GexSvFjfMkm8vy3n8tBeroh3rGNT4UHhX2E+8rk=
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"Tracking [token](/docs/concepts/tokens/) usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
|
||||
"\n",
|
||||
"This guide requires `langchain-anthropic` and `langchain-openai >= 0.1.9`."
|
||||
"This guide requires `langchain-anthropic` and `langchain-openai >= 0.3.11`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -38,19 +38,9 @@
|
||||
"\n",
|
||||
"OpenAI's Chat Completions API does not stream token usage statistics by default (see API reference\n",
|
||||
"[here](https://platform.openai.com/docs/api-reference/completions/create#completions-create-stream_options)).\n",
|
||||
"To recover token counts when streaming with `ChatOpenAI`, set `stream_usage=True` as\n",
|
||||
"To recover token counts when streaming with `ChatOpenAI` or `AzureChatOpenAI`, set `stream_usage=True` as\n",
|
||||
"demonstrated in this guide.\n",
|
||||
"\n",
|
||||
"For `AzureChatOpenAI`, set `stream_options={\"include_usage\": True}` when calling\n",
|
||||
"`.(a)stream`, or initialize with:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"AzureChatOpenAI(\n",
|
||||
" ...,\n",
|
||||
" model_kwargs={\"stream_options\": {\"include_usage\": True}},\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
@@ -67,7 +57,7 @@
|
||||
"\n",
|
||||
"A number of model providers return token usage information as part of the chat generation response. When available, this information will be included on the `AIMessage` objects produced by the corresponding model.\n",
|
||||
"\n",
|
||||
"LangChain `AIMessage` objects include a [usage_metadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `\"input_tokens\"` and `\"output_tokens\"`).\n",
|
||||
"LangChain `AIMessage` objects include a [usage_metadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `\"input_tokens\"` and `\"output_tokens\"`). They will also include information on cached token usage and tokens from multi-modal data.\n",
|
||||
"\n",
|
||||
"Examples:\n",
|
||||
"\n",
|
||||
@@ -92,9 +82,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"llm = init_chat_model(model=\"gpt-4o-mini\")\n",
|
||||
"openai_response = llm.invoke(\"hello\")\n",
|
||||
"openai_response.usage_metadata"
|
||||
]
|
||||
@@ -132,37 +122,6 @@
|
||||
"anthropic_response.usage_metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6d4efc15-ba9f-4b3d-9278-8e01f99f263f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using AIMessage.response_metadata\n",
|
||||
"\n",
|
||||
"Metadata from the model response is also included in the AIMessage [response_metadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.response_metadata) attribute. These data are typically not standardized. Note that different providers adopt different conventions for representing token counts:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "f156f9da-21f2-4c81-a714-54cbf9ad393e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI: {'completion_tokens': 9, 'prompt_tokens': 8, 'total_tokens': 17}\n",
|
||||
"\n",
|
||||
"Anthropic: {'input_tokens': 8, 'output_tokens': 12}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f'OpenAI: {openai_response.response_metadata[\"token_usage\"]}\\n')\n",
|
||||
"print(f'Anthropic: {anthropic_response.response_metadata[\"usage\"]}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4ef2c43-0ff6-49eb-9782-e4070c9da8d7",
|
||||
@@ -207,7 +166,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"llm = init_chat_model(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"aggregate = None\n",
|
||||
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
|
||||
@@ -318,7 +277,7 @@
|
||||
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
"llm = init_chat_model(\n",
|
||||
" model=\"gpt-4o-mini\",\n",
|
||||
" stream_usage=True,\n",
|
||||
")\n",
|
||||
@@ -326,10 +285,10 @@
|
||||
"# chat model and appends a parser.\n",
|
||||
"structured_llm = llm.with_structured_output(Joke)\n",
|
||||
"\n",
|
||||
"async for event in structured_llm.astream_events(\"Tell me a joke\", version=\"v2\"):\n",
|
||||
"async for event in structured_llm.astream_events(\"Tell me a joke\"):\n",
|
||||
" if event[\"event\"] == \"on_chat_model_end\":\n",
|
||||
" print(f'Token usage: {event[\"data\"][\"output\"].usage_metadata}\\n')\n",
|
||||
" elif event[\"event\"] == \"on_chain_end\":\n",
|
||||
" elif event[\"event\"] == \"on_chain_end\" and event[\"name\"] == \"RunnableSequence\":\n",
|
||||
" print(event[\"data\"][\"output\"])\n",
|
||||
" else:\n",
|
||||
" pass"
|
||||
@@ -350,17 +309,18 @@
|
||||
"source": [
|
||||
"## Using callbacks\n",
|
||||
"\n",
|
||||
"There are also some API-specific callback context managers that allow you to track token usage across multiple calls. They are currently only implemented for the OpenAI API and Bedrock Anthropic API, and are available in `langchain-community`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64e52d21",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
":::info Requires ``langchain-core>=0.3.49``\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"LangChain implements a callback handler and context manager that will track token usage across calls of any chat model that returns `usage_metadata`.\n",
|
||||
"\n",
|
||||
"There are also some API-specific callback context managers that maintain pricing for different models, allowing for cost estimation in real time. They are currently only implemented for the OpenAI API and Bedrock Anthropic API, and are available in `langchain-community`:\n",
|
||||
"\n",
|
||||
"- [get_openai_callback](https://python.langchain.com/api_reference/community/callbacks/langchain_community.callbacks.manager.get_openai_callback.html)\n",
|
||||
"- [get_bedrock_anthropic_callback](https://python.langchain.com/api_reference/community/callbacks/langchain_community.callbacks.manager.get_bedrock_anthropic_callback.html)\n",
|
||||
"\n",
|
||||
"Below, we demonstrate the general-purpose usage metadata callback manager. We can track token usage through configuration or as a context manager."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -368,41 +328,84 @@
|
||||
"id": "6f043cb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### OpenAI\n",
|
||||
"### Tracking token usage through configuration\n",
|
||||
"\n",
|
||||
"Let's first look at an extremely simple example of tracking token usage for a single Chat model call."
|
||||
"To track token usage through configuration, instantiate a `UsageMetadataCallbackHandler` and pass it into the config:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 17,
|
||||
"id": "b04a4486-72fd-48ce-8f9e-5d281b441195",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,\n",
|
||||
" 'output_tokens': 10,\n",
|
||||
" 'total_tokens': 18,\n",
|
||||
" 'input_token_details': {'audio': 0, 'cache_read': 0},\n",
|
||||
" 'output_token_details': {'audio': 0, 'reasoning': 0}},\n",
|
||||
" 'claude-3-5-haiku-20241022': {'input_tokens': 8,\n",
|
||||
" 'output_tokens': 21,\n",
|
||||
" 'total_tokens': 29,\n",
|
||||
" 'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"from langchain_core.callbacks import UsageMetadataCallbackHandler\n",
|
||||
"\n",
|
||||
"llm_1 = init_chat_model(model=\"openai:gpt-4o-mini\")\n",
|
||||
"llm_2 = init_chat_model(model=\"anthropic:claude-3-5-haiku-latest\")\n",
|
||||
"\n",
|
||||
"callback = UsageMetadataCallbackHandler()\n",
|
||||
"result_1 = llm_1.invoke(\"Hello\", config={\"callbacks\": [callback]})\n",
|
||||
"result_2 = llm_2.invoke(\"Hello\", config={\"callbacks\": [callback]})\n",
|
||||
"callback.usage_metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7a290085-e541-4233-afe4-637ec5032bfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Tracking token usage using a context manager\n",
|
||||
"\n",
|
||||
"You can also use `get_usage_metadata_callback` to create a context manager and aggregate usage metadata there:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "4728f55a-24e1-48cd-a195-09d037821b1e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 27\n",
|
||||
"\tPrompt Tokens: 11\n",
|
||||
"\tCompletion Tokens: 16\n",
|
||||
"Successful Requests: 1\n",
|
||||
"Total Cost (USD): $2.95e-05\n"
|
||||
"{'gpt-4o-mini-2024-07-18': {'input_tokens': 8, 'output_tokens': 10, 'total_tokens': 18, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}, 'claude-3-5-haiku-20241022': {'input_tokens': 8, 'output_tokens': 21, 'total_tokens': 29, 'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.callbacks.manager import get_openai_callback\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"from langchain_core.callbacks import get_usage_metadata_callback\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-4o-mini\",\n",
|
||||
" temperature=0,\n",
|
||||
" stream_usage=True,\n",
|
||||
")\n",
|
||||
"llm_1 = init_chat_model(model=\"openai:gpt-4o-mini\")\n",
|
||||
"llm_2 = init_chat_model(model=\"anthropic:claude-3-5-haiku-latest\")\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(cb)"
|
||||
"with get_usage_metadata_callback() as cb:\n",
|
||||
" llm_1.invoke(\"Hello\")\n",
|
||||
" llm_2.invoke(\"Hello\")\n",
|
||||
" print(cb.usage_metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -410,61 +413,7 @@
|
||||
"id": "c0ab6d27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Anything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "05f22a1d-b021-490f-8840-f628a07459f2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"54\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" result2 = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "c00c9158-7bb4-4279-88e6-ea70f46e6ac2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 27\n",
|
||||
"\tPrompt Tokens: 11\n",
|
||||
"\tCompletion Tokens: 16\n",
|
||||
"Successful Requests: 1\n",
|
||||
"Total Cost (USD): $2.95e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" for chunk in llm.stream(\"Tell me a joke\"):\n",
|
||||
" pass\n",
|
||||
" print(cb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d8186e7b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If a chain or agent with multiple steps in it is used, it will track all those steps."
|
||||
"Either of these methods will aggregate token usage across multiple calls to each model. For example, you can use it in an [agent](https://python.langchain.com/docs/concepts/agents/) to track token usage across repeated calls to one model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -474,138 +423,63 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-aws wikipedia"
|
||||
"%pip install -qU langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "5d1125c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent, load_tools\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You're a helpful assistant\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"tools = load_tools([\"wikipedia\"])\n",
|
||||
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "3950d88b-8bfb-4294-b75b-e6fd421e633c",
|
||||
"execution_count": 20,
|
||||
"id": "fe945078-ee2d-43ba-8cdf-afb2f2f4ecef",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What's the weather in Boston?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" get_weather (call_izMdhUYpp9Vhx7DTNAiybzGa)\n",
|
||||
" Call ID: call_izMdhUYpp9Vhx7DTNAiybzGa\n",
|
||||
" Args:\n",
|
||||
" location: Boston\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: get_weather\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `{'query': 'hummingbird scientific name'}`\n",
|
||||
"It's sunny.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The weather in Boston is sunny.\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: Hummingbird\n",
|
||||
"Summary: Hummingbirds are birds native to the Americas and comprise the biological family Trochilidae. With approximately 366 species and 113 genera, they occur from Alaska to Tierra del Fuego, but most species are found in Central and South America. As of 2024, 21 hummingbird species are listed as endangered or critically endangered, with numerous species declining in population.\n",
|
||||
"Hummingbirds have varied specialized characteristics to enable rapid, maneuverable flight: exceptional metabolic capacity, adaptations to high altitude, sensitive visual and communication abilities, and long-distance migration in some species. Among all birds, male hummingbirds have the widest diversity of plumage color, particularly in blues, greens, and purples. Hummingbirds are the smallest mature birds, measuring 7.5–13 cm (3–5 in) in length. The smallest is the 5 cm (2.0 in) bee hummingbird, which weighs less than 2.0 g (0.07 oz), and the largest is the 23 cm (9 in) giant hummingbird, weighing 18–24 grams (0.63–0.85 oz). Noted for long beaks, hummingbirds are specialized for feeding on flower nectar, but all species also consume small insects.\n",
|
||||
"They are known as hummingbirds because of the humming sound created by their beating wings, which flap at high frequencies audible to other birds and humans. They hover at rapid wing-flapping rates, which vary from around 12 beats per second in the largest species to 80 per second in small hummingbirds.\n",
|
||||
"Hummingbirds have the highest mass-specific metabolic rate of any homeothermic animal. To conserve energy when food is scarce and at night when not foraging, they can enter torpor, a state similar to hibernation, and slow their metabolic rate to 1⁄15 of its normal rate. While most hummingbirds do not migrate, the rufous hummingbird has one of the longest migrations among birds, traveling twice per year between Alaska and Mexico, a distance of about 3,900 miles (6,300 km).\n",
|
||||
"Hummingbirds split from their sister group, the swifts and treeswifts, around 42 million years ago. The oldest known fossil hummingbird is Eurotrochilus, from the Rupelian Stage of Early Oligocene Europe.\n",
|
||||
"\n",
|
||||
"Page: Rufous hummingbird\n",
|
||||
"Summary: The rufous hummingbird (Selasphorus rufus) is a small hummingbird, about 8 cm (3.1 in) long with a long, straight and slender bill. These birds are known for their extraordinary flight skills, flying 2,000 mi (3,200 km) during their migratory transits. It is one of nine species in the genus Selasphorus.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Allen's hummingbird\n",
|
||||
"Summary: Allen's hummingbird (Selasphorus sasin) is a species of hummingbird that breeds in the western United States. It is one of seven species in the genus Selasphorus.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `{'query': 'fastest bird species'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of birds by flight speed\n",
|
||||
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
|
||||
"\n",
|
||||
"Page: Fastest animals\n",
|
||||
"Summary: This is a list of the fastest animals in the world, by types of animal.\n",
|
||||
"\n",
|
||||
"Page: Falcon\n",
|
||||
"Summary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.\n",
|
||||
"Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.\n",
|
||||
"The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial \"tooth\" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.\n",
|
||||
"The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.\n",
|
||||
"Some small falcons with long, narrow wings are called \"hobbies\" and some which hover while hunting are called \"kestrels\".\n",
|
||||
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species in level flight is the common swift, which holds the record for the fastest confirmed level flight by a bird at 111.5 km/h (69.3 mph). The peregrine falcon is known to exceed speeds of 320 km/h (200 mph) in its dives, making it the fastest bird in terms of diving speed.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Total Tokens: 1675\n",
|
||||
"Prompt Tokens: 1538\n",
|
||||
"Completion Tokens: 137\n",
|
||||
"Total Cost (USD): $0.0009745000000000001\n"
|
||||
"Total usage: {'gpt-4o-mini-2024-07-18': {'input_token_details': {'audio': 0, 'cache_read': 0}, 'input_tokens': 125, 'total_tokens': 149, 'output_tokens': 24, 'output_token_details': {'audio': 0, 'reasoning': 0}}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" response = agent_executor.invoke(\n",
|
||||
" {\n",
|
||||
" \"input\": \"What's a hummingbird's scientific name and what's the fastest bird species?\"\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
||||
" print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
||||
" print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
|
||||
" print(f\"Total Cost (USD): ${cb.total_cost}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ebc9122b-050b-4006-b763-264b0b26d9df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Bedrock Anthropic\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"The `get_bedrock_anthropic_callback` works very similarly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1837c807-136a-49d8-9c33-060e58dc16d2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokens Used: 96\n",
|
||||
"\tPrompt Tokens: 26\n",
|
||||
"\tCompletion Tokens: 70\n",
|
||||
"Successful Requests: 2\n",
|
||||
"Total Cost (USD): $0.001888\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrock\n",
|
||||
"from langchain_community.callbacks.manager import get_bedrock_anthropic_callback\n",
|
||||
"\n",
|
||||
"llm = ChatBedrock(model_id=\"anthropic.claude-v2\")\n",
|
||||
"# Create a tool\n",
|
||||
"def get_weather(location: str) -> str:\n",
|
||||
" \"\"\"Get the weather at a location.\"\"\"\n",
|
||||
" return \"It's sunny.\"\n",
|
||||
"\n",
|
||||
"with get_bedrock_anthropic_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
" result2 = llm.invoke(\"Tell me a joke\")\n",
|
||||
" print(cb)"
|
||||
"\n",
|
||||
"callback = UsageMetadataCallbackHandler()\n",
|
||||
"\n",
|
||||
"tools = [get_weather]\n",
|
||||
"agent = create_react_agent(\"openai:gpt-4o-mini\", tools)\n",
|
||||
"for step in agent.stream(\n",
|
||||
" {\"messages\": [{\"role\": \"user\", \"content\": \"What's the weather in Boston?\"}]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
" config={\"callbacks\": [callback]},\n",
|
||||
"):\n",
|
||||
" step[\"messages\"][-1].pretty_print()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(f\"\\nTotal usage: {callback.usage_metadata}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -127,7 +127,7 @@
|
||||
"id": "c89e2045-9244-43e6-bf3f-59af22658529",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that we've got a [model](/docs/concepts/chat_models/), [retriver](/docs/concepts/retrievers/) and [prompt](/docs/concepts/prompt_templates/), let's chain them all together. Following the how-to guide on [adding citations](/docs/how_to/qa_citations) to a RAG application, we'll make it so our chain returns both the answer and the retrieved Documents. This uses the same [LangGraph](/docs/concepts/architecture/#langgraph) implementation as in the [RAG Tutorial](/docs/tutorials/rag)."
|
||||
"Now that we've got a [model](/docs/concepts/chat_models/), [retriever](/docs/concepts/retrievers/) and [prompt](/docs/concepts/prompt_templates/), let's chain them all together. Following the how-to guide on [adding citations](/docs/how_to/qa_citations) to a RAG application, we'll make it so our chain returns both the answer and the retrieved Documents. This uses the same [LangGraph](/docs/concepts/architecture/#langgraph) implementation as in the [RAG Tutorial](/docs/tutorials/rag)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -270,7 +270,7 @@
|
||||
"source": [
|
||||
"## Retrieval with query analysis\n",
|
||||
"\n",
|
||||
"So how would we include this in a chain? One thing that will make this a lot easier is if we call our retriever asyncronously - this will let us loop over the queries and not get blocked on the response time."
|
||||
"So how would we include this in a chain? One thing that will make this a lot easier is if we call our retriever asynchronously - this will let us loop over the queries and not get blocked on the response time."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"\n",
|
||||
"Note that the map step is typically parallelized over the input documents. This strategy is especially effective when understanding of a sub-document does not rely on preceeding context. For example, when summarizing a corpus of many, shorter documents.\n",
|
||||
"\n",
|
||||
"[LangGraph](https://langchain-ai.github.io/langgraph/), built on top of `langchain-core`, suports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows and is well-suited to this problem:\n",
|
||||
"[LangGraph](https://langchain-ai.github.io/langgraph/), built on top of `langchain-core`, supports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows and is well-suited to this problem:\n",
|
||||
"\n",
|
||||
"- LangGraph allows for individual steps (such as successive summarizations) to be streamed, allowing for greater control of execution;\n",
|
||||
"- LangGraph's [checkpointing](https://langchain-ai.github.io/langgraph/how-tos/persistence/) supports error recovery, extending with human-in-the-loop workflows, and easier incorporation into conversational applications.\n",
|
||||
|
||||
@@ -1,318 +1,316 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "4cebeec0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AI21 Labs\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatAI21\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with AI21 chat models.\n",
|
||||
"Note that different chat models support different parameters. See the [AI21 documentation](https://docs.ai21.com/reference) to learn more about the parameters in your chosen model.\n",
|
||||
"[See all AI21's LangChain components.](https://pypi.org/project/langchain-ai21/) \n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatAI21](https://python.langchain.com/api_reference/ai21/chat_models/langchain_ai21.chat_models.ChatAI21.html#langchain_ai21.chat_models.ChatAI21) | [langchain-ai21](https://python.langchain.com/api_reference/ai21/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"We'll need to get an [AI21 API key](https://docs.ai21.com/) and set the `AI21_API_KEY` environment variable:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"if \"AI21_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"AI21_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6844fff-3702-4489-ab74-732f69f3b9d7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7c2e19d3-7c58-4470-9e1a-718b27a32056",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98e22f31-8acc-42d6-916d-415d1263c56e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f9699cd9-58f2-450e-aa64-799e66906c0f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"!pip install -qU langchain-ai21"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4828829d3da430ce",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "4cebeec0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AI21 Labs\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatAI21\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with AI21 chat models.\n",
|
||||
"Note that different chat models support different parameters. See the [AI21 documentation](https://docs.ai21.com/reference) to learn more about the parameters in your chosen model.\n",
|
||||
"[See all AI21's LangChain components.](https://pypi.org/project/langchain-ai21/)\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatAI21](https://python.langchain.com/api_reference/ai21/chat_models/langchain_ai21.chat_models.ChatAI21.html#langchain_ai21.chat_models.ChatAI21) | [langchain-ai21](https://python.langchain.com/api_reference/ai21/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"We'll need to get an [AI21 API key](https://docs.ai21.com/) and set the `AI21_API_KEY` environment variable:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"if \"AI21_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"AI21_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6844fff-3702-4489-ab74-732f69f3b9d7",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7c2e19d3-7c58-4470-9e1a-718b27a32056",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98e22f31-8acc-42d6-916d-415d1263c56e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f9699cd9-58f2-450e-aa64-799e66906c0f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"!pip install -qU langchain-ai21"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4828829d3da430ce",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c40756fb-cbf8-4d44-a293-3989d707237e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import ChatAI21\n",
|
||||
"\n",
|
||||
"llm = ChatAI21(model=\"jamba-instruct\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bdc5d68-2a19-495e-8c04-d11adc86d3ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "46b982dc-5d8a-46da-a711-81c03ccd6adc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "10a30f84-b531-4fd5-8b5b-91512fbdc75b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "39353473fce5dd2e",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39c0ccd229927eab",
|
||||
"metadata": {},
|
||||
"source": "# Tool Calls / Function Calling"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bf6b40be07fe2d4",
|
||||
"metadata": {},
|
||||
"source": "This example shows how to use tool calling with AI21 models:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a181a28df77120fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_ai21.chat_models import ChatAI21\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_core.utils.function_calling import convert_to_openai_tool\n",
|
||||
"\n",
|
||||
"if \"AI21_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"AI21_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_weather(location: str, date: str) -> str:\n",
|
||||
" \"\"\"“Provide the weather for the specified location on the given date.”\"\"\"\n",
|
||||
" if location == \"New York\" and date == \"2024-12-05\":\n",
|
||||
" return \"25 celsius\"\n",
|
||||
" elif location == \"New York\" and date == \"2024-12-06\":\n",
|
||||
" return \"27 celsius\"\n",
|
||||
" elif location == \"London\" and date == \"2024-12-05\":\n",
|
||||
" return \"22 celsius\"\n",
|
||||
" return \"32 celsius\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatAI21(model=\"jamba-1.5-mini\")\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([convert_to_openai_tool(get_weather)])\n",
|
||||
"\n",
|
||||
"chat_messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant. You can use the provided tools \"\n",
|
||||
" \"to assist with various tasks and provide accurate information\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"human_messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"What is the forecast for the weather in New York on December 5, 2024?\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"And what about the 2024-12-06?\"),\n",
|
||||
" HumanMessage(content=\"OK, thank you.\"),\n",
|
||||
" HumanMessage(content=\"What is the expected weather in London on December 5, 2024?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"for human_message in human_messages:\n",
|
||||
" print(f\"User: {human_message.content}\")\n",
|
||||
" chat_messages.append(human_message)\n",
|
||||
" response = llm_with_tools.invoke(chat_messages)\n",
|
||||
" chat_messages.append(response)\n",
|
||||
" if response.tool_calls:\n",
|
||||
" tool_call = response.tool_calls[0]\n",
|
||||
" if tool_call[\"name\"] == \"get_weather\":\n",
|
||||
" weather = get_weather.invoke(\n",
|
||||
" {\n",
|
||||
" \"location\": tool_call[\"args\"][\"location\"],\n",
|
||||
" \"date\": tool_call[\"args\"][\"date\"],\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" chat_messages.append(\n",
|
||||
" ToolMessage(content=weather, tool_call_id=tool_call[\"id\"])\n",
|
||||
" )\n",
|
||||
" llm_answer = llm_with_tools.invoke(chat_messages)\n",
|
||||
" print(f\"Assistant: {llm_answer.content}\")\n",
|
||||
" else:\n",
|
||||
" print(f\"Assistant: {response.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e79de691-9dd6-4697-b57e-59a4a3cc073a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatAI21 features and configurations head to the API reference: https://python.langchain.com/api_reference/ai21/chat_models/langchain_ai21.chat_models.ChatAI21.html"
|
||||
]
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c40756fb-cbf8-4d44-a293-3989d707237e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import ChatAI21\n",
|
||||
"\n",
|
||||
"llm = ChatAI21(model=\"jamba-instruct\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bdc5d68-2a19-495e-8c04-d11adc86d3ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "46b982dc-5d8a-46da-a711-81c03ccd6adc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "10a30f84-b531-4fd5-8b5b-91512fbdc75b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "39353473fce5dd2e",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39c0ccd229927eab",
|
||||
"metadata": {},
|
||||
"source": "# Tool Calls / Function Calling"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2bf6b40be07fe2d4",
|
||||
"metadata": {},
|
||||
"source": "This example shows how to use tool calling with AI21 models:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a181a28df77120fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_ai21.chat_models import ChatAI21\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_core.utils.function_calling import convert_to_openai_tool\n",
|
||||
"\n",
|
||||
"if \"AI21_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"AI21_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_weather(location: str, date: str) -> str:\n",
|
||||
" \"\"\"“Provide the weather for the specified location on the given date.”\"\"\"\n",
|
||||
" if location == \"New York\" and date == \"2024-12-05\":\n",
|
||||
" return \"25 celsius\"\n",
|
||||
" elif location == \"New York\" and date == \"2024-12-06\":\n",
|
||||
" return \"27 celsius\"\n",
|
||||
" elif location == \"London\" and date == \"2024-12-05\":\n",
|
||||
" return \"22 celsius\"\n",
|
||||
" return \"32 celsius\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatAI21(model=\"jamba-1.5-mini\")\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([convert_to_openai_tool(get_weather)])\n",
|
||||
"\n",
|
||||
"chat_messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant. You can use the provided tools \"\n",
|
||||
" \"to assist with various tasks and provide accurate information\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"human_messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"What is the forecast for the weather in New York on December 5, 2024?\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"And what about the 2024-12-06?\"),\n",
|
||||
" HumanMessage(content=\"OK, thank you.\"),\n",
|
||||
" HumanMessage(content=\"What is the expected weather in London on December 5, 2024?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"for human_message in human_messages:\n",
|
||||
" print(f\"User: {human_message.content}\")\n",
|
||||
" chat_messages.append(human_message)\n",
|
||||
" response = llm_with_tools.invoke(chat_messages)\n",
|
||||
" chat_messages.append(response)\n",
|
||||
" if response.tool_calls:\n",
|
||||
" tool_call = response.tool_calls[0]\n",
|
||||
" if tool_call[\"name\"] == \"get_weather\":\n",
|
||||
" weather = get_weather.invoke(\n",
|
||||
" {\n",
|
||||
" \"location\": tool_call[\"args\"][\"location\"],\n",
|
||||
" \"date\": tool_call[\"args\"][\"date\"],\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
" chat_messages.append(\n",
|
||||
" ToolMessage(content=weather, tool_call_id=tool_call[\"id\"])\n",
|
||||
" )\n",
|
||||
" llm_answer = llm_with_tools.invoke(chat_messages)\n",
|
||||
" print(f\"Assistant: {llm_answer.content}\")\n",
|
||||
" else:\n",
|
||||
" print(f\"Assistant: {response.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e79de691-9dd6-4697-b57e-59a4a3cc073a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatAI21 features and configurations head to the API reference: https://python.langchain.com/api_reference/ai21/chat_models/langchain_ai21.chat_models.ChatAI21.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,349 +1,347 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Azure OpenAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AzureChatOpenAI\n",
|
||||
"\n",
|
||||
"This guide will help you get started with AzureOpenAI [chat models](/docs/concepts/chat_models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html).\n",
|
||||
"\n",
|
||||
"Azure OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).\n",
|
||||
"\n",
|
||||
":::info Azure OpenAI vs OpenAI\n",
|
||||
"\n",
|
||||
"Azure OpenAI refers to OpenAI models hosted on the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). OpenAI also provides its own model APIs. To access OpenAI services directly, use the [ChatOpenAI integration](/docs/integrations/chat/openai/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/azure) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [AzureChatOpenAI](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html) | [langchain-openai](https://python.langchain.com/api_reference/openai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access AzureOpenAI models you'll need to create an Azure account, create a deployment of an Azure OpenAI model, get the name and endpoint for your deployment, get an Azure OpenAI API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python) to create your deployment and generate an API key. Once you've done this set the AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT environment variables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"AZURE_OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your AzureOpenAI API key: \"\n",
|
||||
" )\n",
|
||||
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://YOUR-ENDPOINT.openai.azure.com/\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain AzureOpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions.\n",
|
||||
"- Replace `azure_deployment` with the name of your deployment,\n",
|
||||
"- You can find the latest supported `api_version` here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import AzureChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = AzureChatOpenAI(\n",
|
||||
" azure_deployment=\"gpt-35-turbo\", # or your deployment\n",
|
||||
" api_version=\"2023-06-01-preview\", # or your api version\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 31, 'total_tokens': 39}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-bea4b46c-e3e1-4495-9d3a-698370ad963d-0', usage_metadata={'input_tokens': 31, 'output_tokens': 8, 'total_tokens': 39})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Azure OpenAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 26, 'total_tokens': 32}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-cbc44038-09d3-40d4-9da2-c5910ee636ca-0', usage_metadata={'input_tokens': 26, 'output_tokens': 6, 'total_tokens': 32})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AzureChatOpenAI\n",
|
||||
"\n",
|
||||
"This guide will help you get started with AzureOpenAI [chat models](/docs/concepts/chat_models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html).\n",
|
||||
"\n",
|
||||
"Azure OpenAI has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).\n",
|
||||
"\n",
|
||||
":::info Azure OpenAI vs OpenAI\n",
|
||||
"\n",
|
||||
"Azure OpenAI refers to OpenAI models hosted on the [Microsoft Azure platform](https://azure.microsoft.com/en-us/products/ai-services/openai-service). OpenAI also provides its own model APIs. To access OpenAI services directly, use the [ChatOpenAI integration](/docs/integrations/chat/openai/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/azure) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [AzureChatOpenAI](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html) | [langchain-openai](https://python.langchain.com/api_reference/openai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access AzureOpenAI models you'll need to create an Azure account, create a deployment of an Azure OpenAI model, get the name and endpoint for your deployment, get an Azure OpenAI API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python) to create your deployment and generate an API key. Once you've done this set the AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT environment variables:"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Specifying model version\n",
|
||||
"\n",
|
||||
"Azure OpenAI responses contain `model_name` response metadata property, which is name of the model used to generate the response. However unlike native OpenAI responses, it does not contain the specific version of the model, which is set on the deployment in Azure. E.g. it does not distinguish between `gpt-35-turbo-0125` and `gpt-35-turbo-0301`. This makes it tricky to know which version of the model was used to generate the response, which as result can lead to e.g. wrong total cost calculation with `OpenAICallbackHandler`.\n",
|
||||
"\n",
|
||||
"To solve this problem, you can pass `model_version` parameter to `AzureChatOpenAI` class, which will be added to the model name in the llm output. This way you can easily distinguish between different versions of the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "04b36e75-e8b7-4721-899e-76301ac2ecd9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2ca02d23-60d0-43eb-8d04-070f61f8fefd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Total Cost (USD): $0.000063\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" llm.invoke(messages)\n",
|
||||
" print(\n",
|
||||
" f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\"\n",
|
||||
" ) # without specifying the model version, flat-rate 0.002 USD per 1k input and output tokens is used"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e1b07ae2-3de7-44bd-bfdc-b76f4ba45a35",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"AZURE_OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your AzureOpenAI API key: \"\n",
|
||||
" )\n",
|
||||
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://YOUR-ENDPOINT.openai.azure.com/\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Total Cost (USD): $0.000074\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain AzureOpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions.\n",
|
||||
"- Replace `azure_deployment` with the name of your deployment,\n",
|
||||
"- You can find the latest supported `api_version` here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import AzureChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = AzureChatOpenAI(\n",
|
||||
" azure_deployment=\"gpt-35-turbo\", # or your deployment\n",
|
||||
" api_version=\"2023-06-01-preview\", # or your api version\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 31, 'total_tokens': 39}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-bea4b46c-e3e1-4495-9d3a-698370ad963d-0', usage_metadata={'input_tokens': 31, 'output_tokens': 8, 'total_tokens': 39})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 26, 'total_tokens': 32}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-cbc44038-09d3-40d4-9da2-c5910ee636ca-0', usage_metadata={'input_tokens': 26, 'output_tokens': 6, 'total_tokens': 32})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Specifying model version\n",
|
||||
"\n",
|
||||
"Azure OpenAI responses contain `model_name` response metadata property, which is name of the model used to generate the response. However unlike native OpenAI responses, it does not contain the specific version of the model, which is set on the deployment in Azure. E.g. it does not distinguish between `gpt-35-turbo-0125` and `gpt-35-turbo-0301`. This makes it tricky to know which version of the model was used to generate the response, which as result can lead to e.g. wrong total cost calculation with `OpenAICallbackHandler`.\n",
|
||||
"\n",
|
||||
"To solve this problem, you can pass `model_version` parameter to `AzureChatOpenAI` class, which will be added to the model name in the llm output. This way you can easily distinguish between different versions of the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "04b36e75-e8b7-4721-899e-76301ac2ecd9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2ca02d23-60d0-43eb-8d04-070f61f8fefd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Total Cost (USD): $0.000063\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.callbacks import get_openai_callback\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" llm.invoke(messages)\n",
|
||||
" print(\n",
|
||||
" f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\"\n",
|
||||
" ) # without specifying the model version, flat-rate 0.002 USD per 1k input and output tokens is used"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e1b07ae2-3de7-44bd-bfdc-b76f4ba45a35",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Total Cost (USD): $0.000074\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_0301 = AzureChatOpenAI(\n",
|
||||
" azure_deployment=\"gpt-35-turbo\", # or your deployment\n",
|
||||
" api_version=\"2023-06-01-preview\", # or your api version\n",
|
||||
" model_version=\"0301\",\n",
|
||||
")\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" llm_0301.invoke(messages)\n",
|
||||
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all AzureChatOpenAI features and configurations head to the API reference: https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_0301 = AzureChatOpenAI(\n",
|
||||
" azure_deployment=\"gpt-35-turbo\", # or your deployment\n",
|
||||
" api_version=\"2023-06-01-preview\", # or your api version\n",
|
||||
" model_version=\"0301\",\n",
|
||||
")\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" llm_0301.invoke(messages)\n",
|
||||
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all AzureChatOpenAI features and configurations head to the API reference: https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.azure.AzureChatOpenAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,380 +1,378 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AWS Bedrock\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatBedrock\n",
|
||||
"\n",
|
||||
"This doc will help you get started with AWS Bedrock [chat models](/docs/concepts/chat_models). Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with.\n",
|
||||
"\n",
|
||||
"For more information on which models are accessible via Bedrock, head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/models-features.html).\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/bedrock) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatBedrock](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html) | [langchain-aws](https://python.langchain.com/api_reference/aws/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `langchain-aws` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/setting-up.html) to sign up to AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by following [these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Bedrock integration lives in the `langchain-aws` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrock\n",
|
||||
"\n",
|
||||
"llm = ChatBedrock(\n",
|
||||
" model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
|
||||
" model_kwargs=dict(temperature=0),\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-fdb07dc3-ff72-430d-b22b-e7824b15c766-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AWS Bedrock\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Voici la traduction en français :\n",
|
||||
"\n",
|
||||
"J'aime la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-5ad005ce-9f31-4670-baa0-9373d418698a-0', usage_metadata={'input_tokens': 23, 'output_tokens': 11, 'total_tokens': 34})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatBedrock\n",
|
||||
"\n",
|
||||
"This doc will help you get started with AWS Bedrock [chat models](/docs/concepts/chat_models). Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with.\n",
|
||||
"\n",
|
||||
"For more information on which models are accessible via Bedrock, head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/models-features.html).\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/bedrock) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatBedrock](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html) | [langchain-aws](https://python.langchain.com/api_reference/aws/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `langchain-aws` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/setting-up.html) to sign up to AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by following [these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html)."
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Bedrock Converse API\n",
|
||||
"\n",
|
||||
"AWS has recently released the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) integration has been released.\n",
|
||||
"\n",
|
||||
"We recommend using `ChatBedrockConverse` for users who do not need to use custom models.\n",
|
||||
"\n",
|
||||
"You can use it like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ae728e59-94d4-40cf-9d24-25ad8723fc59",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", response_metadata={'ResponseMetadata': {'RequestId': '4fcbfbe9-f916-4df2-b0bd-ea1147b550aa', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Wed, 21 Aug 2024 17:23:49 GMT', 'content-type': 'application/json', 'content-length': '243', 'connection': 'keep-alive', 'x-amzn-requestid': '4fcbfbe9-f916-4df2-b0bd-ea1147b550aa'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': 672}}, id='run-77ee9810-e32b-45dc-9ccb-6692253b1f45-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrockConverse\n",
|
||||
"\n",
|
||||
"llm = ChatBedrockConverse(\n",
|
||||
" model=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4da16f3e-e80b-48c0-8036-c1cc5f7c8c05",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"Note that `ChatBedrockConverse` emits content blocks while streaming:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7794b32e-d8de-4973-bf0f-39807dc745f0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=[] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'Vo', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ici', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' la', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' tra', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'duction', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' en', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' français', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' :', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': '\\n\\nJ', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': \"'\", 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'a', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ime', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' la', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' programm', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ation', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': '.', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[] response_metadata={'stopReason': 'end_turn'} id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[] response_metadata={'metrics': {'latencyMs': 713}} id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8' usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in llm.stream(messages):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ef05abb-9c04-4dc3-995e-f857779644d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"An output parser can be used to filter to text, if desired:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2a4e743f-ea7d-4e5a-9b12-f9992362de8b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Bedrock integration lives in the `langchain-aws` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"|Vo|ici| la| tra|duction| en| français| :|\n",
|
||||
"\n",
|
||||
"J|'|a|ime| la| programm|ation|.||||"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrock\n",
|
||||
"\n",
|
||||
"llm = ChatBedrock(\n",
|
||||
" model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
|
||||
" model_kwargs=dict(temperature=0),\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-fdb07dc3-ff72-430d-b22b-e7824b15c766-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Voici la traduction en français :\n",
|
||||
"\n",
|
||||
"J'aime la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-5ad005ce-9f31-4670-baa0-9373d418698a-0', usage_metadata={'input_tokens': 23, 'output_tokens': 11, 'total_tokens': 34})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Bedrock Converse API\n",
|
||||
"\n",
|
||||
"AWS has recently released the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) integration has been released.\n",
|
||||
"\n",
|
||||
"We recommend using `ChatBedrockConverse` for users who do not need to use custom models.\n",
|
||||
"\n",
|
||||
"You can use it like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ae728e59-94d4-40cf-9d24-25ad8723fc59",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", response_metadata={'ResponseMetadata': {'RequestId': '4fcbfbe9-f916-4df2-b0bd-ea1147b550aa', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Wed, 21 Aug 2024 17:23:49 GMT', 'content-type': 'application/json', 'content-length': '243', 'connection': 'keep-alive', 'x-amzn-requestid': '4fcbfbe9-f916-4df2-b0bd-ea1147b550aa'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': 672}}, id='run-77ee9810-e32b-45dc-9ccb-6692253b1f45-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrockConverse\n",
|
||||
"\n",
|
||||
"llm = ChatBedrockConverse(\n",
|
||||
" model=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4da16f3e-e80b-48c0-8036-c1cc5f7c8c05",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"Note that `ChatBedrockConverse` emits content blocks while streaming:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7794b32e-d8de-4973-bf0f-39807dc745f0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=[] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'Vo', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ici', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' la', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' tra', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'duction', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' en', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' français', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' :', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': '\\n\\nJ', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': \"'\", 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'a', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ime', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' la', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' programm', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ation', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': '.', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[] response_metadata={'stopReason': 'end_turn'} id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[] response_metadata={'metrics': {'latencyMs': 713}} id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8' usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in llm.stream(messages):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ef05abb-9c04-4dc3-995e-f857779644d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"An output parser can be used to filter to text, if desired:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2a4e743f-ea7d-4e5a-9b12-f9992362de8b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"|Vo|ici| la| tra|duction| en| français| :|\n",
|
||||
"\n",
|
||||
"J|'|a|ime| la| programm|ation|.||||"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"\n",
|
||||
"chain = llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream(messages):\n",
|
||||
" print(chunk, end=\"|\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the API reference: https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrockConverse features and configurations head to the API reference: https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"\n",
|
||||
"chain = llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream(messages):\n",
|
||||
" print(chunk, end=\"|\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the API reference: https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrockConverse features and configurations head to the API reference: https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,424 +1,422 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cerebras\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatCerebras\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Cerebras [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatCerebras features and configurations head to the [API reference](https://python.langchain.com/api_reference/cerebras/chat_models/langchain_cerebras.chat_models.ChatCerebras.html#).\n",
|
||||
"\n",
|
||||
"At Cerebras, we've developed the world's largest and fastest AI processor, the Wafer-Scale Engine-3 (WSE-3). The Cerebras CS-3 system, powered by the WSE-3, represents a new class of AI supercomputer that sets the standard for generative AI training and inference with unparalleled performance and scalability.\n",
|
||||
"\n",
|
||||
"With Cerebras as your inference provider, you can:\n",
|
||||
"- Achieve unprecedented speed for AI inference workloads\n",
|
||||
"- Build commercially with high throughput\n",
|
||||
"- Effortlessly scale your AI workloads with our seamless clustering technology\n",
|
||||
"\n",
|
||||
"Our CS-3 systems can be quickly and easily clustered to create the largest AI supercomputers in the world, making it simple to place and run the largest models. Leading corporations, research institutions, and governments are already using Cerebras solutions to develop proprietary models and train popular open-source models.\n",
|
||||
"\n",
|
||||
"Want to experience the power of Cerebras? Check out our [website](https://cerebras.ai) for more resources and explore options for accessing our technology through the Cerebras Cloud or on-premise deployments!\n",
|
||||
"\n",
|
||||
"For more information about Cerebras Cloud, visit [cloud.cerebras.ai](https://cloud.cerebras.ai/). Our API reference is available at [inference-docs.cerebras.ai](https://inference-docs.cerebras.ai/).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/cerebras) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatCerebras](https://python.langchain.com/api_reference/cerebras/chat_models/langchain_cerebras.chat_models.ChatCerebras.html#) | [langchain-cerebras](https://python.langchain.com/api_reference/cerebras/index.html) | ❌ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install langchain-cerebras\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Get an API Key from [cloud.cerebras.ai](https://cloud.cerebras.ai/) and add it to your environment variables:\n",
|
||||
"```\n",
|
||||
"export CEREBRAS_API_KEY=\"your-api-key-here\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ce19c2d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your Cerebras API key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"CEREBRAS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"CEREBRAS_API_KEY\"] = getpass.getpass(\"Enter your Cerebras API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Cerebras integration lives in the `langchain-cerebras` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-cerebras"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea69675d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "21155898",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Je adore le programmation.', response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 35, 'total_tokens': 42}, 'model_name': 'llama3-8b-8192', 'system_fingerprint': 'fp_be27ec77ff', 'finish_reason': 'stop'}, id='run-e5d66faf-019c-4ac6-9265-71093b13202d-0', usage_metadata={'input_tokens': 35, 'output_tokens': 7, 'total_tokens': 42})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cerebras\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren!\\n\\n(Literally: I love programming!)', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 30, 'total_tokens': 44}, 'model_name': 'llama3-8b-8192', 'system_fingerprint': 'fp_be27ec77ff', 'finish_reason': 'stop'}, id='run-e1d2ebb8-76d1-471b-9368-3b68d431f16a-0', usage_metadata={'input_tokens': 30, 'output_tokens': 14, 'total_tokens': 44})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatCerebras\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Cerebras [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatCerebras features and configurations head to the [API reference](https://python.langchain.com/api_reference/cerebras/chat_models/langchain_cerebras.chat_models.ChatCerebras.html#).\n",
|
||||
"\n",
|
||||
"At Cerebras, we've developed the world's largest and fastest AI processor, the Wafer-Scale Engine-3 (WSE-3). The Cerebras CS-3 system, powered by the WSE-3, represents a new class of AI supercomputer that sets the standard for generative AI training and inference with unparalleled performance and scalability.\n",
|
||||
"\n",
|
||||
"With Cerebras as your inference provider, you can:\n",
|
||||
"- Achieve unprecedented speed for AI inference workloads\n",
|
||||
"- Build commercially with high throughput\n",
|
||||
"- Effortlessly scale your AI workloads with our seamless clustering technology\n",
|
||||
"\n",
|
||||
"Our CS-3 systems can be quickly and easily clustered to create the largest AI supercomputers in the world, making it simple to place and run the largest models. Leading corporations, research institutions, and governments are already using Cerebras solutions to develop proprietary models and train popular open-source models.\n",
|
||||
"\n",
|
||||
"Want to experience the power of Cerebras? Check out our [website](https://cerebras.ai) for more resources and explore options for accessing our technology through the Cerebras Cloud or on-premise deployments!\n",
|
||||
"\n",
|
||||
"For more information about Cerebras Cloud, visit [cloud.cerebras.ai](https://cloud.cerebras.ai/). Our API reference is available at [inference-docs.cerebras.ai](https://inference-docs.cerebras.ai/).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/cerebras) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatCerebras](https://python.langchain.com/api_reference/cerebras/chat_models/langchain_cerebras.chat_models.ChatCerebras.html#) | [langchain-cerebras](https://python.langchain.com/api_reference/cerebras/index.html) | ❌ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install langchain-cerebras\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Get an API Key from [cloud.cerebras.ai](https://cloud.cerebras.ai/) and add it to your environment variables:\n",
|
||||
"```\n",
|
||||
"export CEREBRAS_API_KEY=\"your-api-key-here\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ec73a0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "46fd21a7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OH BOY! Let me tell you all about LIONS!\n",
|
||||
"\n",
|
||||
"Lions are the kings of the jungle! They're really big and have beautiful, fluffy manes around their necks. The mane is like a big, golden crown!\n",
|
||||
"\n",
|
||||
"Lions live in groups called prides. A pride is like a big family, and the lionesses (that's what we call the female lions) take care of the babies. The lionesses are like the mommies, and they teach the babies how to hunt and play.\n",
|
||||
"\n",
|
||||
"Lions are very good at hunting. They work together to catch their food, like zebras and antelopes. They're super fast and can run really, really fast!\n",
|
||||
"\n",
|
||||
"But lions are also very sleepy. They like to take long naps in the sun, and they can sleep for up to 20 hours a day! Can you imagine sleeping that much?\n",
|
||||
"\n",
|
||||
"Lions are also very loud. They roar really loudly to talk to each other. It's like they're saying, \"ROAR! I'm the king of the jungle!\"\n",
|
||||
"\n",
|
||||
"And guess what? Lions are very social. They like to play and cuddle with each other. They're like big, furry teddy bears!\n",
|
||||
"\n",
|
||||
"So, that's lions! Aren't they just the coolest?"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"system = \"You are an expert on animals who must answer questions in a manner that a 5 year old can understand.\"\n",
|
||||
"human = \"I want to learn more about this animal: {animal}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream({\"animal\": \"Lion\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f67b6132",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "a3a45baf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ice', response_metadata={'token_usage': {'completion_tokens': 2, 'prompt_tokens': 36, 'total_tokens': 38}, 'model_name': 'llama3-8b-8192', 'system_fingerprint': 'fp_be27ec77ff', 'finish_reason': 'stop'}, id='run-7434bdde-1bec-44cf-827b-8d978071dfe8-0', usage_metadata={'input_tokens': 36, 'output_tokens': 2, 'total_tokens': 38})"
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ce19c2d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your Cerebras API key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"CEREBRAS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"CEREBRAS_API_KEY\"] = getpass.getpass(\"Enter your Cerebras API key: \")"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Let's play a game of opposites. What's the opposite of {topic}? Just give me the answer with no extra input.\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"chain = prompt | llm\n",
|
||||
"await chain.ainvoke({\"topic\": \"fire\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f9d9945",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "c7448e0f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In the distant reaches of the cosmos, there existed a peculiar phenomenon known as the \"Eclipse of Eternity,\" a swirling vortex of darkness that had been shrouded in mystery for eons. It was said that this blackhole, born from the cataclysmic collision of two ancient stars, had been slowly devouring the fabric of space-time itself, warping the very essence of reality. As the celestial bodies of the galaxy danced around it, they began to notice a strange, almost imperceptible distortion in the fabric of space, as if the blackhole's gravitational pull was exerting an influence on the very course of events itself.\n",
|
||||
"\n",
|
||||
"As the centuries passed, astronomers from across the galaxy became increasingly fascinated by the Eclipse of Eternity, pouring over ancient texts and scouring the cosmos for any hint of its secrets. One such scholar, a brilliant and reclusive astrophysicist named Dr. Elara Vex, became obsessed with unraveling the mysteries of the blackhole. She spent years pouring over ancient texts, deciphering cryptic messages and hidden codes that hinted at the existence of a long-lost civilization that had once thrived in the heart of the blackhole itself. According to legend, this ancient civilization had possessed knowledge of the cosmos that was beyond human comprehension, and had used their mastery of the universe to create the Eclipse of Eternity as a gateway to other dimensions.\n",
|
||||
"\n",
|
||||
"As Dr. Vex delved deeper into her research, she began to experience strange and vivid dreams, visions that seemed to transport her to the very heart of the blackhole itself. In these dreams, she saw ancient beings, their faces twisted in agony as they were consumed by the void. She saw stars and galaxies, their light warped and distorted by the blackhole's gravitational pull. And she saw the Eclipse of Eternity itself, its swirling vortex of darkness pulsing with an otherworldly energy that seemed to be calling to her. As the dreams grew more vivid and more frequent, Dr. Vex became convinced that she was being drawn into the heart of the blackhole, and that the secrets of the universe lay waiting for her on the other side."
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Cerebras integration lives in the `langchain-cerebras` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-cerebras"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea69675d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "21155898",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Je adore le programmation.', response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 35, 'total_tokens': 42}, 'model_name': 'llama3-8b-8192', 'system_fingerprint': 'fp_be27ec77ff', 'finish_reason': 'stop'}, id='run-e5d66faf-019c-4ac6-9265-71093b13202d-0', usage_metadata={'input_tokens': 35, 'output_tokens': 7, 'total_tokens': 42})"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren!\\n\\n(Literally: I love programming!)', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 30, 'total_tokens': 44}, 'model_name': 'llama3-8b-8192', 'system_fingerprint': 'fp_be27ec77ff', 'finish_reason': 'stop'}, id='run-e1d2ebb8-76d1-471b-9368-3b68d431f16a-0', usage_metadata={'input_tokens': 30, 'output_tokens': 14, 'total_tokens': 44})"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ec73a0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "46fd21a7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OH BOY! Let me tell you all about LIONS!\n",
|
||||
"\n",
|
||||
"Lions are the kings of the jungle! They're really big and have beautiful, fluffy manes around their necks. The mane is like a big, golden crown!\n",
|
||||
"\n",
|
||||
"Lions live in groups called prides. A pride is like a big family, and the lionesses (that's what we call the female lions) take care of the babies. The lionesses are like the mommies, and they teach the babies how to hunt and play.\n",
|
||||
"\n",
|
||||
"Lions are very good at hunting. They work together to catch their food, like zebras and antelopes. They're super fast and can run really, really fast!\n",
|
||||
"\n",
|
||||
"But lions are also very sleepy. They like to take long naps in the sun, and they can sleep for up to 20 hours a day! Can you imagine sleeping that much?\n",
|
||||
"\n",
|
||||
"Lions are also very loud. They roar really loudly to talk to each other. It's like they're saying, \"ROAR! I'm the king of the jungle!\"\n",
|
||||
"\n",
|
||||
"And guess what? Lions are very social. They like to play and cuddle with each other. They're like big, furry teddy bears!\n",
|
||||
"\n",
|
||||
"So, that's lions! Aren't they just the coolest?"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"system = \"You are an expert on animals who must answer questions in a manner that a 5 year old can understand.\"\n",
|
||||
"human = \"I want to learn more about this animal: {animal}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream({\"animal\": \"Lion\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f67b6132",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "a3a45baf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ice', response_metadata={'token_usage': {'completion_tokens': 2, 'prompt_tokens': 36, 'total_tokens': 38}, 'model_name': 'llama3-8b-8192', 'system_fingerprint': 'fp_be27ec77ff', 'finish_reason': 'stop'}, id='run-7434bdde-1bec-44cf-827b-8d978071dfe8-0', usage_metadata={'input_tokens': 36, 'output_tokens': 2, 'total_tokens': 38})"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Let's play a game of opposites. What's the opposite of {topic}? Just give me the answer with no extra input.\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"chain = prompt | llm\n",
|
||||
"await chain.ainvoke({\"topic\": \"fire\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4f9d9945",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Async Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "c7448e0f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"In the distant reaches of the cosmos, there existed a peculiar phenomenon known as the \"Eclipse of Eternity,\" a swirling vortex of darkness that had been shrouded in mystery for eons. It was said that this blackhole, born from the cataclysmic collision of two ancient stars, had been slowly devouring the fabric of space-time itself, warping the very essence of reality. As the celestial bodies of the galaxy danced around it, they began to notice a strange, almost imperceptible distortion in the fabric of space, as if the blackhole's gravitational pull was exerting an influence on the very course of events itself.\n",
|
||||
"\n",
|
||||
"As the centuries passed, astronomers from across the galaxy became increasingly fascinated by the Eclipse of Eternity, pouring over ancient texts and scouring the cosmos for any hint of its secrets. One such scholar, a brilliant and reclusive astrophysicist named Dr. Elara Vex, became obsessed with unraveling the mysteries of the blackhole. She spent years pouring over ancient texts, deciphering cryptic messages and hidden codes that hinted at the existence of a long-lost civilization that had once thrived in the heart of the blackhole itself. According to legend, this ancient civilization had possessed knowledge of the cosmos that was beyond human comprehension, and had used their mastery of the universe to create the Eclipse of Eternity as a gateway to other dimensions.\n",
|
||||
"\n",
|
||||
"As Dr. Vex delved deeper into her research, she began to experience strange and vivid dreams, visions that seemed to transport her to the very heart of the blackhole itself. In these dreams, she saw ancient beings, their faces twisted in agony as they were consumed by the void. She saw stars and galaxies, their light warped and distorted by the blackhole's gravitational pull. And she saw the Eclipse of Eternity itself, its swirling vortex of darkness pulsing with an otherworldly energy that seemed to be calling to her. As the dreams grew more vivid and more frequent, Dr. Vex became convinced that she was being drawn into the heart of the blackhole, and that the secrets of the universe lay waiting for her on the other side."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Write a long convoluted story about {subject}. I want {num_paragraphs} paragraphs.\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"async for chunk in chain.astream({\"num_paragraphs\": 3, \"subject\": \"blackholes\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatCerebras features and configurations head to the API reference: https://python.langchain.com/api_reference/cerebras/chat_models/langchain_cerebras.chat_models.ChatCerebras.html#"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_cerebras import ChatCerebras\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatCerebras(\n",
|
||||
" model=\"llama-3.3-70b\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Write a long convoluted story about {subject}. I want {num_paragraphs} paragraphs.\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"async for chunk in chain.astream({\"num_paragraphs\": 3, \"subject\": \"blackholes\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatCerebras features and configurations head to the API reference: https://python.langchain.com/api_reference/cerebras/chat_models/langchain_cerebras.chat_models.ChatCerebras.html#"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,264 +1,262 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "30373ae2-f326-4e96-a1f7-062f57396886",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cloudflare Workers AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f679592d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"This will help you getting started with CloudflareWorkersAI [chat models](/docs/concepts/chat_models). For detailed documentation of all available Cloudflare WorkersAI models head to the [API reference](https://developers.cloudflare.com/workers-ai/).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/cloudflare_workersai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ChatCloudflareWorkersAI | langchain-community| ❌ | ❌ | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- To access Cloudflare Workers AI models you'll need to create a Cloudflare account, get an account number and API key, and install the `langchain-community` package.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to [this document](https://developers.cloudflare.com/workers-ai/get-started/rest-api/) to sign up to Cloudflare Workers AI and generate an API key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4a524cff",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "71b53c25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "777a8526",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain ChatCloudflareWorkersAI integration lives in the `langchain-community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54990998",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "629ba46f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ec13c2d9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.cloudflare_workersai import ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"llm = ChatCloudflareWorkersAI(\n",
|
||||
" account_id=\"my_account_id\",\n",
|
||||
" api_token=\"my_api_token\",\n",
|
||||
" model=\"@hf/nousresearch/hermes-2-pro-mistral-7b\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "119b6732",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2438a906",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-11-07 15:55:14 - INFO - Sending prompt to Cloudflare Workers AI: {'prompt': 'role: system, content: You are a helpful assistant that translates English to French. Translate the user sentence.\\nrole: user, content: I love programming.', 'tools': None}\n"
|
||||
]
|
||||
"cell_type": "raw",
|
||||
"id": "30373ae2-f326-4e96-a1f7-062f57396886",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cloudflare Workers AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='{\\'result\\': {\\'response\\': \\'Je suis un assistant virtuel qui peut traduire l\\\\\\'anglais vers le français. La phrase que vous avez dite est : \"J\\\\\\'aime programmer.\" En français, cela se traduit par : \"J\\\\\\'adore programmer.\"\\'}, \\'success\\': True, \\'errors\\': [], \\'messages\\': []}', additional_kwargs={}, response_metadata={}, id='run-838fd398-8594-4ca5-9055-03c72993caf6-0')"
|
||||
"cell_type": "markdown",
|
||||
"id": "f679592d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"This will help you getting started with CloudflareWorkersAI [chat models](/docs/concepts/chat_models). For detailed documentation of all available Cloudflare WorkersAI models head to the [API reference](https://developers.cloudflare.com/workers-ai/).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/cloudflare_workersai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ChatCloudflareWorkersAI | langchain-community| ❌ | ❌ | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- To access Cloudflare Workers AI models you'll need to create a Cloudflare account, get an account number and API key, and install the `langchain-community` package.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to [this document](https://developers.cloudflare.com/workers-ai/get-started/rest-api/) to sign up to Cloudflare Workers AI and generate an API key."
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1b4911bd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'result': {'response': 'Je suis un assistant virtuel qui peut traduire l\\'anglais vers le français. La phrase que vous avez dite est : \"J\\'aime programmer.\" En français, cela se traduit par : \"J\\'adore programmer.\"'}, 'success': True, 'errors': [], 'messages': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "111aa5d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "b2a14282",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-11-07 15:55:24 - INFO - Sending prompt to Cloudflare Workers AI: {'prompt': 'role: system, content: You are a helpful assistant that translates English to German.\\nrole: user, content: I love programming.', 'tools': None}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"{'result': {'response': 'role: system, content: Das ist sehr nett zu hören! Programmieren lieben, ist eine interessante und anspruchsvolle Hobby- oder Berufsausrichtung. Wenn Sie englische Texte ins Deutsche übersetzen möchten, kann ich Ihnen helfen. Geben Sie bitte den englischen Satz oder die Übersetzung an, die Sie benötigen.'}, 'success': True, 'errors': [], 'messages': []}\", additional_kwargs={}, response_metadata={}, id='run-0d3be9a6-3d74-4dde-b49a-4479d6af00ef-0')"
|
||||
"cell_type": "markdown",
|
||||
"id": "4a524cff",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "71b53c25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "777a8526",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain ChatCloudflareWorkersAI integration lives in the `langchain-community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54990998",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "629ba46f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ec13c2d9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.cloudflare_workersai import ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"llm = ChatCloudflareWorkersAI(\n",
|
||||
" account_id=\"my_account_id\",\n",
|
||||
" api_token=\"my_api_token\",\n",
|
||||
" model=\"@hf/nousresearch/hermes-2-pro-mistral-7b\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "119b6732",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2438a906",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-11-07 15:55:14 - INFO - Sending prompt to Cloudflare Workers AI: {'prompt': 'role: system, content: You are a helpful assistant that translates English to French. Translate the user sentence.\\nrole: user, content: I love programming.', 'tools': None}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='{\\'result\\': {\\'response\\': \\'Je suis un assistant virtuel qui peut traduire l\\\\\\'anglais vers le français. La phrase que vous avez dite est : \"J\\\\\\'aime programmer.\" En français, cela se traduit par : \"J\\\\\\'adore programmer.\"\\'}, \\'success\\': True, \\'errors\\': [], \\'messages\\': []}', additional_kwargs={}, response_metadata={}, id='run-838fd398-8594-4ca5-9055-03c72993caf6-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1b4911bd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'result': {'response': 'Je suis un assistant virtuel qui peut traduire l\\'anglais vers le français. La phrase que vous avez dite est : \"J\\'aime programmer.\" En français, cela se traduit par : \"J\\'adore programmer.\"'}, 'success': True, 'errors': [], 'messages': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "111aa5d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "b2a14282",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-11-07 15:55:24 - INFO - Sending prompt to Cloudflare Workers AI: {'prompt': 'role: system, content: You are a helpful assistant that translates English to German.\\nrole: user, content: I love programming.', 'tools': None}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"{'result': {'response': 'role: system, content: Das ist sehr nett zu hören! Programmieren lieben, ist eine interessante und anspruchsvolle Hobby- oder Berufsausrichtung. Wenn Sie englische Texte ins Deutsche übersetzen möchten, kann ich Ihnen helfen. Geben Sie bitte den englischen Satz oder die Übersetzung an, die Sie benötigen.'}, 'success': True, 'errors': [], 'messages': []}\", additional_kwargs={}, response_metadata={}, id='run-0d3be9a6-3d74-4dde-b49a-4479d6af00ef-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e1f311bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `ChatCloudflareWorkersAI` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.cloudflare_workersai.html)."
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e1f311bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `ChatCloudflareWorkersAI` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.cloudflare_workersai.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,352 +1,350 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "53fbf15f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cohere\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bf733a38-db84-4363-89e2-de6735c37230",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Cohere\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with [Cohere chat models](https://cohere.com/chat).\n",
|
||||
"\n",
|
||||
"Head to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.cohere.ChatCohere.html) for detailed documentation of all attributes and methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3607d67e-e56c-4102-bbba-df2edc0e109e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-cohere` package. We can install these with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install -U langchain-cohere\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We'll also need to get a [Cohere API key](https://cohere.com/) and set the `COHERE_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "2108b517-1e8d-473d-92fa-4f930e8072a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"COHERE_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf690fbb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "7f11de02",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c26754b-b3c9-4d93-8f36-43049bd943bf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"ChatCohere supports all [ChatModel](/docs/how_to#chat-models) functionality:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_cohere import ChatCohere\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatCohere()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='4 && 5 \\n6 || 7 \\n\\nWould you like to play a game of odds and evens?', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '2076b614-52b3-4082-a259-cc92cd3d9fea', 'token_count': {'prompt_tokens': 68, 'response_tokens': 23, 'total_tokens': 91, 'billed_tokens': 77}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '2076b614-52b3-4082-a259-cc92cd3d9fea', 'token_count': {'prompt_tokens': 68, 'response_tokens': 23, 'total_tokens': 91, 'billed_tokens': 77}}, id='run-3475e0c8-c89b-4937-9300-e07d652455e1-0')"
|
||||
"cell_type": "raw",
|
||||
"id": "53fbf15f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cohere\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [HumanMessage(content=\"1\"), HumanMessage(content=\"2 3\")]\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='4 && 5', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'f0708a92-f874-46ee-9b93-334d616ad92e', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'f0708a92-f874-46ee-9b93-334d616ad92e', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, id='run-1635e63e-2994-4e7f-986e-152ddfc95777-0')"
|
||||
"cell_type": "markdown",
|
||||
"id": "bf733a38-db84-4363-89e2-de6735c37230",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Cohere\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with [Cohere chat models](https://cohere.com/chat).\n",
|
||||
"\n",
|
||||
"Head to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.cohere.ChatCohere.html) for detailed documentation of all attributes and methods."
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await chat.ainvoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "025be980-e50d-4a68-93dc-c9c7b500ce34",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"4 && 5"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "064288e4-f184-4496-9427-bcf148fa055e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content='4 && 5', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6770ca86-f6c3-4ba3-a285-c4772160612f', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6770ca86-f6c3-4ba3-a285-c4772160612f', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, id='run-8d6fade2-1b39-4e31-ab23-4be622dd0027-0')]"
|
||||
"cell_type": "markdown",
|
||||
"id": "3607d67e-e56c-4102-bbba-df2edc0e109e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-cohere` package. We can install these with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install -U langchain-cohere\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We'll also need to get a [Cohere API key](https://cohere.com/) and set the `COHERE_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat.batch([messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f1c56460",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts/lcel)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "0851b103",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||
"chain = prompt | chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "ae950c0f-1691-47f1-b609-273033cae707",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='What color socks do bears wear?\\n\\nThey don’t wear socks, they have bear feet. \\n\\nHope you laughed! If not, maybe this will help: laughter is the best medicine, and a good sense of humor is infectious!', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6edccf44-9bc8-4139-b30e-13b368f3563c', 'token_count': {'prompt_tokens': 68, 'response_tokens': 51, 'total_tokens': 119, 'billed_tokens': 108}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6edccf44-9bc8-4139-b30e-13b368f3563c', 'token_count': {'prompt_tokens': 68, 'response_tokens': 51, 'total_tokens': 119, 'billed_tokens': 108}}, id='run-ef7f9789-0d4d-43bf-a4f7-f2a0e27a5320-0')"
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "2108b517-1e8d-473d-92fa-4f930e8072a7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"COHERE_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"topic\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12db8d69",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"Cohere supports tool calling functionalities!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "337e24af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" HumanMessage,\n",
|
||||
" ToolMessage,\n",
|
||||
")\n",
|
||||
"from langchain_core.tools import tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "74d292e7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool\n",
|
||||
"def magic_function(number: int) -> int:\n",
|
||||
" \"\"\"Applies a magic operation to an integer\n",
|
||||
" Args:\n",
|
||||
" number: Number to have magic operation performed on\n",
|
||||
" \"\"\"\n",
|
||||
" return number + 10\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def invoke_tools(tool_calls, messages):\n",
|
||||
" for tool_call in tool_calls:\n",
|
||||
" selected_tool = {\"magic_function\": magic_function}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
" return messages\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [magic_function]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "ecafcbc6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_with_tools = chat.bind_tools(tools=tools)\n",
|
||||
"messages = [HumanMessage(content=\"What is the value of magic_function(2)?\")]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "aa34fc39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The value of magic_function(2) is 12.', additional_kwargs={'documents': [{'id': 'magic_function:0:2:0', 'output': '12', 'tool_name': 'magic_function'}], 'citations': [ChatCitation(start=34, end=36, text='12', document_ids=['magic_function:0:2:0'])], 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '96a55791-0c58-4e2e-bc2a-8550e137c46d', 'token_count': {'input_tokens': 998, 'output_tokens': 59}}, response_metadata={'documents': [{'id': 'magic_function:0:2:0', 'output': '12', 'tool_name': 'magic_function'}], 'citations': [ChatCitation(start=34, end=36, text='12', document_ids=['magic_function:0:2:0'])], 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '96a55791-0c58-4e2e-bc2a-8550e137c46d', 'token_count': {'input_tokens': 998, 'output_tokens': 59}}, id='run-f318a9cf-55c8-44f4-91d1-27cf46c6a465-0')"
|
||||
"cell_type": "markdown",
|
||||
"id": "cf690fbb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3c2fc2201dc80557",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "31f2af10e04dec59",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"ChatCohere supports all [ChatModel](/docs/how_to#chat-models) functionality:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fa83b00a929614ad",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_cohere import ChatCohere\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatCohere()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='4 && 5 \\n6 || 7 \\n\\nWould you like to play a game of odds and evens?', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '2076b614-52b3-4082-a259-cc92cd3d9fea', 'token_count': {'prompt_tokens': 68, 'response_tokens': 23, 'total_tokens': 91, 'billed_tokens': 77}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '2076b614-52b3-4082-a259-cc92cd3d9fea', 'token_count': {'prompt_tokens': 68, 'response_tokens': 23, 'total_tokens': 91, 'billed_tokens': 77}}, id='run-3475e0c8-c89b-4937-9300-e07d652455e1-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [HumanMessage(content=\"1\"), HumanMessage(content=\"2 3\")]\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='4 && 5', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'f0708a92-f874-46ee-9b93-334d616ad92e', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': 'f0708a92-f874-46ee-9b93-334d616ad92e', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, id='run-1635e63e-2994-4e7f-986e-152ddfc95777-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await chat.ainvoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "025be980-e50d-4a68-93dc-c9c7b500ce34",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"4 && 5"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "064288e4-f184-4496-9427-bcf148fa055e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content='4 && 5', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6770ca86-f6c3-4ba3-a285-c4772160612f', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6770ca86-f6c3-4ba3-a285-c4772160612f', 'token_count': {'prompt_tokens': 68, 'response_tokens': 3, 'total_tokens': 71, 'billed_tokens': 57}}, id='run-8d6fade2-1b39-4e31-ab23-4be622dd0027-0')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat.batch([messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f1c56460",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"You can also easily combine with a prompt template for easy structuring of user input. We can do this using [LCEL](/docs/concepts/lcel)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "0851b103",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||
"chain = prompt | chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "ae950c0f-1691-47f1-b609-273033cae707",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='What color socks do bears wear?\\n\\nThey don’t wear socks, they have bear feet. \\n\\nHope you laughed! If not, maybe this will help: laughter is the best medicine, and a good sense of humor is infectious!', additional_kwargs={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6edccf44-9bc8-4139-b30e-13b368f3563c', 'token_count': {'prompt_tokens': 68, 'response_tokens': 51, 'total_tokens': 119, 'billed_tokens': 108}}, response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '6edccf44-9bc8-4139-b30e-13b368f3563c', 'token_count': {'prompt_tokens': 68, 'response_tokens': 51, 'total_tokens': 119, 'billed_tokens': 108}}, id='run-ef7f9789-0d4d-43bf-a4f7-f2a0e27a5320-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke({\"topic\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12db8d69",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"Cohere supports tool calling functionalities!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "337e24af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" HumanMessage,\n",
|
||||
" ToolMessage,\n",
|
||||
")\n",
|
||||
"from langchain_core.tools import tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "74d292e7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool\n",
|
||||
"def magic_function(number: int) -> int:\n",
|
||||
" \"\"\"Applies a magic operation to an integer\n",
|
||||
" Args:\n",
|
||||
" number: Number to have magic operation performed on\n",
|
||||
" \"\"\"\n",
|
||||
" return number + 10\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def invoke_tools(tool_calls, messages):\n",
|
||||
" for tool_call in tool_calls:\n",
|
||||
" selected_tool = {\"magic_function\": magic_function}[tool_call[\"name\"].lower()]\n",
|
||||
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
|
||||
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
|
||||
" return messages\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [magic_function]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "ecafcbc6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_with_tools = chat.bind_tools(tools=tools)\n",
|
||||
"messages = [HumanMessage(content=\"What is the value of magic_function(2)?\")]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "aa34fc39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The value of magic_function(2) is 12.', additional_kwargs={'documents': [{'id': 'magic_function:0:2:0', 'output': '12', 'tool_name': 'magic_function'}], 'citations': [ChatCitation(start=34, end=36, text='12', document_ids=['magic_function:0:2:0'])], 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '96a55791-0c58-4e2e-bc2a-8550e137c46d', 'token_count': {'input_tokens': 998, 'output_tokens': 59}}, response_metadata={'documents': [{'id': 'magic_function:0:2:0', 'output': '12', 'tool_name': 'magic_function'}], 'citations': [ChatCitation(start=34, end=36, text='12', document_ids=['magic_function:0:2:0'])], 'search_results': None, 'search_queries': None, 'is_search_required': None, 'generation_id': '96a55791-0c58-4e2e-bc2a-8550e137c46d', 'token_count': {'input_tokens': 998, 'output_tokens': 59}}, id='run-f318a9cf-55c8-44f4-91d1-27cf46c6a465-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"res = llm_with_tools.invoke(messages)\n",
|
||||
"while res.tool_calls:\n",
|
||||
" messages.append(res)\n",
|
||||
" messages = invoke_tools(res.tool_calls, messages)\n",
|
||||
" res = llm_with_tools.invoke(messages)\n",
|
||||
"\n",
|
||||
"res"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"res = llm_with_tools.invoke(messages)\n",
|
||||
"while res.tool_calls:\n",
|
||||
" messages.append(res)\n",
|
||||
" messages = invoke_tools(res.tool_calls, messages)\n",
|
||||
" res = llm_with_tools.invoke(messages)\n",
|
||||
"\n",
|
||||
"res"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,237 +1,235 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: DeepSeek\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: DeepSeek\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatDeepSeek\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This will help you getting started with DeepSeek's hosted [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatDeepSeek features and configurations head to the [API reference](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html).\n",
|
||||
"\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"DeepSeek's models are open source and can be run locally (e.g. in [Ollama](./ollama.ipynb)) or on other inference providers (e.g. [Fireworks](./fireworks.ipynb), [Together](./together.ipynb)) as well.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/deepseek) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatDeepSeek](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html) | [langchain-deepseek](https://python.langchain.com/api_reference/deepseek/) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"DeepSeek-R1, specified via `model=\"deepseek-reasoner\"`, does not support tool calling or structured output. Those features [are supported](https://api-docs.deepseek.com/guides/function_calling) by DeepSeek-V3 (specified via `model=\"deepseek-chat\"`).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access DeepSeek models you'll need to create a/an DeepSeek account, get an API key, and install the `langchain-deepseek` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [DeepSeek's API Key page](https://platform.deepseek.com/api_keys) to sign up to DeepSeek and generate an API key. Once you've done this set the `DEEPSEEK_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"DEEPSEEK_API_KEY\"):\n",
|
||||
" os.environ[\"DEEPSEEK_API_KEY\"] = getpass.getpass(\"Enter your DeepSeek API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain DeepSeek integration lives in the `langchain-deepseek` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-deepseek"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_deepseek import ChatDeepSeek\n",
|
||||
"\n",
|
||||
"llm = ChatDeepSeek(\n",
|
||||
" model=\"deepseek-chat\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatDeepSeek features and configurations head to the [API Reference](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatDeepSeek\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This will help you getting started with DeepSeek's hosted [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatDeepSeek features and configurations head to the [API reference](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html).\n",
|
||||
"\n",
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"DeepSeek's models are open source and can be run locally (e.g. in [Ollama](./ollama.ipynb)) or on other inference providers (e.g. [Fireworks](./fireworks.ipynb), [Together](./together.ipynb)) as well.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/deepseek) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatDeepSeek](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html) | [langchain-deepseek](https://python.langchain.com/api_reference/deepseek/) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"DeepSeek-R1, specified via `model=\"deepseek-reasoner\"`, does not support tool calling or structured output. Those features [are supported](https://api-docs.deepseek.com/guides/function_calling) by DeepSeek-V3 (specified via `model=\"deepseek-chat\"`).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access DeepSeek models you'll need to create a/an DeepSeek account, get an API key, and install the `langchain-deepseek` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [DeepSeek's API Key page](https://platform.deepseek.com/api_keys) to sign up to DeepSeek and generate an API key. Once you've done this set the `DEEPSEEK_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"DEEPSEEK_API_KEY\"):\n",
|
||||
" os.environ[\"DEEPSEEK_API_KEY\"] = getpass.getpass(\"Enter your DeepSeek API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain DeepSeek integration lives in the `langchain-deepseek` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-deepseek"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_deepseek import ChatDeepSeek\n",
|
||||
"\n",
|
||||
"llm = ChatDeepSeek(\n",
|
||||
" model=\"deepseek-chat\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatDeepSeek features and configurations head to the [API Reference](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,268 +1,266 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Fireworks\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatFireworks\n",
|
||||
"\n",
|
||||
"This doc help you get started with Fireworks AI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatFireworks features and configurations head to the [API reference](https://python.langchain.com/api_reference/fireworks/chat_models/langchain_fireworks.chat_models.ChatFireworks.html).\n",
|
||||
"\n",
|
||||
"Fireworks AI is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatFireworks](https://python.langchain.com/api_reference/fireworks/chat_models/langchain_fireworks.chat_models.ChatFireworks.html) | [langchain-fireworks](https://python.langchain.com/api_reference/fireworks/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Fireworks models you'll need to create a Fireworks account, get an API key, and install the `langchain-fireworks` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to (ttps://fireworks.ai/login to sign up to Fireworks and generate an API key. Once you've done this set the FIREWORKS_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"FIREWORKS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Enter your Fireworks API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Fireworks integration lives in the `langchain-fireworks` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-fireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_fireworks import ChatFireworks\n",
|
||||
"\n",
|
||||
"llm = ChatFireworks(\n",
|
||||
" model=\"accounts/fireworks/models/llama-v3-70b-instruct\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'prompt_tokens': 35, 'total_tokens': 44, 'completion_tokens': 9}, 'model_name': 'accounts/fireworks/models/llama-v3-70b-instruct', 'system_fingerprint': '', 'finish_reason': 'stop', 'logprobs': None}, id='run-df28e69a-ff30-457e-a743-06eb14d01cb0-0', usage_metadata={'input_tokens': 35, 'output_tokens': 9, 'total_tokens': 44})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Fireworks\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'prompt_tokens': 30, 'total_tokens': 37, 'completion_tokens': 7}, 'model_name': 'accounts/fireworks/models/llama-v3-70b-instruct', 'system_fingerprint': '', 'finish_reason': 'stop', 'logprobs': None}, id='run-ff3f91ad-ed81-4acf-9f59-7490dc8d8f48-0', usage_metadata={'input_tokens': 30, 'output_tokens': 7, 'total_tokens': 37})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatFireworks\n",
|
||||
"\n",
|
||||
"This doc help you get started with Fireworks AI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatFireworks features and configurations head to the [API reference](https://python.langchain.com/api_reference/fireworks/chat_models/langchain_fireworks.chat_models.ChatFireworks.html).\n",
|
||||
"\n",
|
||||
"Fireworks AI is an AI inference platform to run and customize models. For a list of all models served by Fireworks see the [Fireworks docs](https://fireworks.ai/models).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatFireworks](https://python.langchain.com/api_reference/fireworks/chat_models/langchain_fireworks.chat_models.ChatFireworks.html) | [langchain-fireworks](https://python.langchain.com/api_reference/fireworks/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Fireworks models you'll need to create a Fireworks account, get an API key, and install the `langchain-fireworks` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to (ttps://fireworks.ai/login to sign up to Fireworks and generate an API key. Once you've done this set the FIREWORKS_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"FIREWORKS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Enter your Fireworks API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Fireworks integration lives in the `langchain-fireworks` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-fireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_fireworks import ChatFireworks\n",
|
||||
"\n",
|
||||
"llm = ChatFireworks(\n",
|
||||
" model=\"accounts/fireworks/models/llama-v3-70b-instruct\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'prompt_tokens': 35, 'total_tokens': 44, 'completion_tokens': 9}, 'model_name': 'accounts/fireworks/models/llama-v3-70b-instruct', 'system_fingerprint': '', 'finish_reason': 'stop', 'logprobs': None}, id='run-df28e69a-ff30-457e-a743-06eb14d01cb0-0', usage_metadata={'input_tokens': 35, 'output_tokens': 9, 'total_tokens': 44})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'prompt_tokens': 30, 'total_tokens': 37, 'completion_tokens': 7}, 'model_name': 'accounts/fireworks/models/llama-v3-70b-instruct', 'system_fingerprint': '', 'finish_reason': 'stop', 'logprobs': None}, id='run-ff3f91ad-ed81-4acf-9f59-7490dc8d8f48-0', usage_metadata={'input_tokens': 30, 'output_tokens': 7, 'total_tokens': 37})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatFireworks features and configurations head to the API reference: https://python.langchain.com/api_reference/fireworks/chat_models/langchain_fireworks.chat_models.ChatFireworks.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatFireworks features and configurations head to the API reference: https://python.langchain.com/api_reference/fireworks/chat_models/langchain_fireworks.chat_models.ChatFireworks.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,354 +1,352 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Goodfire\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGoodfire\n",
|
||||
"\n",
|
||||
"This will help you getting started with Goodfire [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGoodfire features and configurations head to the [PyPI project page](https://pypi.org/project/langchain-goodfire/), or go directly to the [Goodfire SDK docs](https://docs.goodfire.ai/sdk-reference/example). All of the Goodfire-specific functionality (e.g. SAE features, variants, etc.) is available via the main `goodfire` package. This integration is a wrapper around the Goodfire SDK.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatGoodfire](https://python.langchain.com/api_reference/goodfire/chat_models/langchain_goodfire.chat_models.ChatGoodfire.html) | [langchain-goodfire](https://python.langchain.com/api_reference/goodfire/) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Goodfire models you'll need to create a/an Goodfire account, get an API key, and install the `langchain-goodfire` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [Goodfire Settings](https://platform.goodfire.ai/organization/settings/api-keys) to sign up to Goodfire and generate an API key. Once you've done this set the GOODFIRE_API_KEY environment variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"GOODFIRE_API_KEY\"):\n",
|
||||
" os.environ[\"GOODFIRE_API_KEY\"] = getpass.getpass(\"Enter your Goodfire API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Goodfire integration lives in the `langchain-goodfire` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-goodfire"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import goodfire\n",
|
||||
"from langchain_goodfire import ChatGoodfire\n",
|
||||
"\n",
|
||||
"base_variant = goodfire.Variant(\"meta-llama/Llama-3.3-70B-Instruct\")\n",
|
||||
"\n",
|
||||
"llm = ChatGoodfire(\n",
|
||||
" model=base_variant,\n",
|
||||
" temperature=0,\n",
|
||||
" max_completion_tokens=1000,\n",
|
||||
" seed=42,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, response_metadata={}, id='run-8d43cf35-bce8-4827-8935-c64f8fb78cd0-0', usage_metadata={'input_tokens': 51, 'output_tokens': 39, 'total_tokens': 90})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Goodfire\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = await llm.ainvoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren. How can I help you with programming today?', additional_kwargs={}, response_metadata={}, id='run-03d1a585-8234-46f1-a8df-bf9143fe3309-0', usage_metadata={'input_tokens': 46, 'output_tokens': 46, 'total_tokens': 92})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGoodfire\n",
|
||||
"\n",
|
||||
"This will help you getting started with Goodfire [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGoodfire features and configurations head to the [PyPI project page](https://pypi.org/project/langchain-goodfire/), or go directly to the [Goodfire SDK docs](https://docs.goodfire.ai/sdk-reference/example). All of the Goodfire-specific functionality (e.g. SAE features, variants, etc.) is available via the main `goodfire` package. This integration is a wrapper around the Goodfire SDK.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatGoodfire](https://python.langchain.com/api_reference/goodfire/chat_models/langchain_goodfire.chat_models.ChatGoodfire.html) | [langchain-goodfire](https://python.langchain.com/api_reference/goodfire/) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Goodfire models you'll need to create a/an Goodfire account, get an API key, and install the `langchain-goodfire` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [Goodfire Settings](https://platform.goodfire.ai/organization/settings/api-keys) to sign up to Goodfire and generate an API key. Once you've done this set the GOODFIRE_API_KEY environment variable."
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"await chain.ainvoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Goodfire-specific functionality\n",
|
||||
"\n",
|
||||
"To use Goodfire-specific functionality such as SAE features and variants, you can use the `goodfire` package directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3aef9e0a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"FeatureGroup([\n",
|
||||
" 0: \"The assistant should adopt the persona of a pirate\",\n",
|
||||
" 1: \"The assistant should roleplay as a pirate\",\n",
|
||||
" 2: \"The assistant should engage with pirate-themed content or roleplay as a pirate\",\n",
|
||||
" 3: \"The assistant should roleplay as a character\",\n",
|
||||
" 4: \"The assistant should roleplay as a specific character\",\n",
|
||||
" 5: \"The assistant should roleplay as a game character or NPC\",\n",
|
||||
" 6: \"The assistant should roleplay as a human character\",\n",
|
||||
" 7: \"Requests for the assistant to roleplay or pretend to be something else\",\n",
|
||||
" 8: \"Requests for the assistant to roleplay or pretend to be something\",\n",
|
||||
" 9: \"The assistant is being assigned a role or persona to roleplay\"\n",
|
||||
"])"
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"GOODFIRE_API_KEY\"):\n",
|
||||
" os.environ[\"GOODFIRE_API_KEY\"] = getpass.getpass(\"Enter your Goodfire API key: \")"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"client = goodfire.Client(api_key=os.environ[\"GOODFIRE_API_KEY\"])\n",
|
||||
"\n",
|
||||
"pirate_features = client.features.search(\n",
|
||||
" \"assistant should roleplay as a pirate\", base_variant\n",
|
||||
")\n",
|
||||
"pirate_features"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "52f03a00",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field! Arrr! Hope that made ye laugh, matey!', additional_kwargs={}, response_metadata={}, id='run-7d8bd30f-7f80-41cb-bdb6-25c29c22a7ce-0', usage_metadata={'input_tokens': 35, 'output_tokens': 60, 'total_tokens': 95})"
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Goodfire integration lives in the `langchain-goodfire` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-goodfire"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import goodfire\n",
|
||||
"from langchain_goodfire import ChatGoodfire\n",
|
||||
"\n",
|
||||
"base_variant = goodfire.Variant(\"meta-llama/Llama-3.3-70B-Instruct\")\n",
|
||||
"\n",
|
||||
"llm = ChatGoodfire(\n",
|
||||
" model=base_variant,\n",
|
||||
" temperature=0,\n",
|
||||
" max_completion_tokens=1000,\n",
|
||||
" seed=42,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, response_metadata={}, id='run-8d43cf35-bce8-4827-8935-c64f8fb78cd0-0', usage_metadata={'input_tokens': 51, 'output_tokens': 39, 'total_tokens': 90})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = await llm.ainvoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren. How can I help you with programming today?', additional_kwargs={}, response_metadata={}, id='run-03d1a585-8234-46f1-a8df-bf9143fe3309-0', usage_metadata={'input_tokens': 46, 'output_tokens': 46, 'total_tokens': 92})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"await chain.ainvoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Goodfire-specific functionality\n",
|
||||
"\n",
|
||||
"To use Goodfire-specific functionality such as SAE features and variants, you can use the `goodfire` package directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "3aef9e0a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"FeatureGroup([\n",
|
||||
" 0: \"The assistant should adopt the persona of a pirate\",\n",
|
||||
" 1: \"The assistant should roleplay as a pirate\",\n",
|
||||
" 2: \"The assistant should engage with pirate-themed content or roleplay as a pirate\",\n",
|
||||
" 3: \"The assistant should roleplay as a character\",\n",
|
||||
" 4: \"The assistant should roleplay as a specific character\",\n",
|
||||
" 5: \"The assistant should roleplay as a game character or NPC\",\n",
|
||||
" 6: \"The assistant should roleplay as a human character\",\n",
|
||||
" 7: \"Requests for the assistant to roleplay or pretend to be something else\",\n",
|
||||
" 8: \"Requests for the assistant to roleplay or pretend to be something\",\n",
|
||||
" 9: \"The assistant is being assigned a role or persona to roleplay\"\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"client = goodfire.Client(api_key=os.environ[\"GOODFIRE_API_KEY\"])\n",
|
||||
"\n",
|
||||
"pirate_features = client.features.search(\n",
|
||||
" \"assistant should roleplay as a pirate\", base_variant\n",
|
||||
")\n",
|
||||
"pirate_features"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "52f03a00",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field! Arrr! Hope that made ye laugh, matey!', additional_kwargs={}, response_metadata={}, id='run-7d8bd30f-7f80-41cb-bdb6-25c29c22a7ce-0', usage_metadata={'input_tokens': 35, 'output_tokens': 60, 'total_tokens': 95})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pirate_variant = goodfire.Variant(\"meta-llama/Llama-3.3-70B-Instruct\")\n",
|
||||
"\n",
|
||||
"pirate_variant.set(pirate_features[0], 0.4)\n",
|
||||
"pirate_variant.set(pirate_features[1], 0.3)\n",
|
||||
"\n",
|
||||
"await llm.ainvoke(\"Tell me a joke\", model=pirate_variant)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGoodfire features and configurations head to the [API reference](https://python.langchain.com/api_reference/goodfire/chat_models/langchain_goodfire.chat_models.ChatGoodfire.html)"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pirate_variant = goodfire.Variant(\"meta-llama/Llama-3.3-70B-Instruct\")\n",
|
||||
"\n",
|
||||
"pirate_variant.set(pirate_features[0], 0.4)\n",
|
||||
"pirate_variant.set(pirate_features[1], 0.3)\n",
|
||||
"\n",
|
||||
"await llm.ainvoke(\"Tell me a joke\", model=pirate_variant)"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGoodfire features and configurations head to the [API reference](https://python.langchain.com/api_reference/goodfire/chat_models/langchain_goodfire.chat_models.ChatGoodfire.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,314 +1,312 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"This docs will help you get started with Google AI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html).\n",
|
||||
"\n",
|
||||
"Google AI offers a number of different chat models. For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini).\n",
|
||||
"\n",
|
||||
":::info Google AI vs Google Cloud Vertex AI\n",
|
||||
"\n",
|
||||
"Google's Gemini models are accessible through Google AI and through Google Cloud Vertex AI. Using Google AI just requires a Google account and an API key. Using Google Cloud Vertex AI requires a Google Cloud account (with term agreements and billing) but offers enterprise features like customer encription key, virtual private cloud, and more.\n",
|
||||
"\n",
|
||||
"To learn more about the key features of the two APIs see the [Google docs](https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai#google-ai).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_generativeai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatGoogleGenerativeAI](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html) | [langchain-google-genai](https://python.langchain.com/api_reference/google_genai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Google AI models you'll need to create a Google Acount account, get a Google AI API key, and install the `langchain-google-genai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to https://ai.google.dev/gemini-api/docs/api-key to generate a Google AI API key. Once you've done this set the GOOGLE_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"GOOGLE_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"GOOGLE_API_KEY\"] = getpass.getpass(\"Enter your Google AI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Google AI integration lives in the `langchain-google-genai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-genai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(\n",
|
||||
" model=\"gemini-1.5-pro\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-eef5b138-1da6-4226-9cfe-ab9073ddd77e-0', usage_metadata={'input_tokens': 21, 'output_tokens': 5, 'total_tokens': 26})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren. \\n', response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-fbb35f30-4937-4a81-ae68-f7cb35721a0c-0', usage_metadata={'input_tokens': 16, 'output_tokens': 7, 'total_tokens': 23})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"This docs will help you get started with Google AI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html).\n",
|
||||
"\n",
|
||||
"Google AI offers a number of different chat models. For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini).\n",
|
||||
"\n",
|
||||
":::info Google AI vs Google Cloud Vertex AI\n",
|
||||
"\n",
|
||||
"Google's Gemini models are accessible through Google AI and through Google Cloud Vertex AI. Using Google AI just requires a Google account and an API key. Using Google Cloud Vertex AI requires a Google Cloud account (with term agreements and billing) but offers enterprise features like customer encription key, virtual private cloud, and more.\n",
|
||||
"\n",
|
||||
"To learn more about the key features of the two APIs see the [Google docs](https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai#google-ai).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_generativeai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatGoogleGenerativeAI](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html) | [langchain-google-genai](https://python.langchain.com/api_reference/google_genai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Google AI models you'll need to create a Google Acount account, get a Google AI API key, and install the `langchain-google-genai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to https://ai.google.dev/gemini-api/docs/api-key to generate a Google AI API key. Once you've done this set the GOOGLE_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"GOOGLE_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"GOOGLE_API_KEY\"] = getpass.getpass(\"Enter your Google AI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Google AI integration lives in the `langchain-google-genai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-genai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(\n",
|
||||
" model=\"gemini-1.5-pro\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-eef5b138-1da6-4226-9cfe-ab9073ddd77e-0', usage_metadata={'input_tokens': 21, 'output_tokens': 5, 'total_tokens': 26})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren. \\n', response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-fbb35f30-4937-4a81-ae68-f7cb35721a0c-0', usage_metadata={'input_tokens': 16, 'output_tokens': 7, 'total_tokens': 23})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Safety Settings\n",
|
||||
"\n",
|
||||
"Gemini models have default safety settings that can be overridden. If you are receiving lots of \"Safety Warnings\" from your models, you can try tweaking the `safety_settings` attribute of the model. For example, to turn off safety blocking for dangerous content, you can construct your LLM as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "238b2f96-e573-4fac-bbf2-7e52ad926833",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_genai import (\n",
|
||||
" ChatGoogleGenerativeAI,\n",
|
||||
" HarmBlockThreshold,\n",
|
||||
" HarmCategory,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(\n",
|
||||
" model=\"gemini-1.5-pro\",\n",
|
||||
" safety_settings={\n",
|
||||
" HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5805d40c-deb8-4924-8e72-a294a0482fc9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For an enumeration of the categories and thresholds available, see Google's [safety setting types](https://ai.google.dev/api/python/google/generativeai/types/SafetySettingDict)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the API reference: https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Safety Settings\n",
|
||||
"\n",
|
||||
"Gemini models have default safety settings that can be overridden. If you are receiving lots of \"Safety Warnings\" from your models, you can try tweaking the `safety_settings` attribute of the model. For example, to turn off safety blocking for dangerous content, you can construct your LLM as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "238b2f96-e573-4fac-bbf2-7e52ad926833",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_genai import (\n",
|
||||
" ChatGoogleGenerativeAI,\n",
|
||||
" HarmBlockThreshold,\n",
|
||||
" HarmCategory,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(\n",
|
||||
" model=\"gemini-1.5-pro\",\n",
|
||||
" safety_settings={\n",
|
||||
" HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,\n",
|
||||
" },\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5805d40c-deb8-4924-8e72-a294a0482fc9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For an enumeration of the categories and thresholds available, see Google's [safety setting types](https://ai.google.dev/api/python/google/generativeai/types/SafetySettingDict)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the API reference: https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,269 +1,269 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Cloud Vertex AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatVertexAI\n",
|
||||
"\n",
|
||||
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
|
||||
"\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"\n",
|
||||
":::info Google Cloud VertexAI vs Google PaLM\n",
|
||||
"\n",
|
||||
"The Google Cloud VertexAI integration is separate from the [Google PaLM integration](/docs/integrations/chat/google_generative_ai/). Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there. \n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_vertex_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatVertexAI](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://python.langchain.com/api_reference/google_vertexai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access VertexAI models you'll need to create a Google Cloud Platform account, set up credentials, and install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"To use the integration you must:\n",
|
||||
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
|
||||
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
|
||||
"\n",
|
||||
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
|
||||
"\n",
|
||||
"For more information, see: \n",
|
||||
"- https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"- https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth\n",
|
||||
"\n",
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain VertexAI integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(\n",
|
||||
" model=\"gemini-1.5-flash-001\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" max_retries=6,\n",
|
||||
" stop=None,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 20, 'candidates_token_count': 7, 'total_token_count': 27}}, id='run-7032733c-d05c-4f0c-a17a-6c575fdd1ae0-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Cloud Vertex AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren. \\n', response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 15, 'candidates_token_count': 8, 'total_token_count': 23}}, id='run-c71955fd-8dc1-422b-88a7-853accf4811b-0', usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatVertexAI\n",
|
||||
"\n",
|
||||
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
|
||||
"\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"\n",
|
||||
":::info Google Cloud VertexAI vs Google PaLM\n",
|
||||
"\n",
|
||||
"The Google Cloud VertexAI integration is separate from the [Google PaLM integration](/docs/integrations/chat/google_generative_ai/). Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_vertex_ai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatVertexAI](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) | [langchain-google-vertexai](https://python.langchain.com/api_reference/google_vertexai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access VertexAI models you'll need to create a Google Cloud Platform account, set up credentials, and install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"To use the integration you must:\n",
|
||||
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
|
||||
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
|
||||
"\n",
|
||||
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
|
||||
"\n",
|
||||
"For more information, see:\n",
|
||||
"- https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"- https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth\n",
|
||||
"\n",
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain VertexAI integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(\n",
|
||||
" model=\"gemini-1.5-flash-001\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" max_retries=6,\n",
|
||||
" stop=None,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer. \\n\", response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 20, 'candidates_token_count': 7, 'total_token_count': 27}}, id='run-7032733c-d05c-4f0c-a17a-6c575fdd1ae0-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer. \n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren. \\n', response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 15, 'candidates_token_count': 8, 'total_token_count': 23}}, id='run-c71955fd-8dc1-422b-88a7-853accf4811b-0', usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatVertexAI features and configurations, like how to send multimodal inputs and configure safety settings, head to the API reference: https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatVertexAI features and configurations, like how to send multimodal inputs and configure safety settings, head to the API reference: https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,266 +1,264 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Groq\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGroq\n",
|
||||
"\n",
|
||||
"This will help you getting started with Groq [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html). For a list of all Groq models, visit this [link](https://console.groq.com/docs/models?utm_source=langchain).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/groq) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatGroq](https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html) | [langchain-groq](https://python.langchain.com/api_reference/groq/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Groq models you'll need to create a Groq account, get an API key, and install the `langchain-groq` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [Groq console](https://console.groq.com/login?utm_source=langchain&utm_content=chat_page) to sign up to Groq and generate an API key. Once you've done this set the GROQ_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"GROQ_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"GROQ_API_KEY\"] = getpass.getpass(\"Enter your Groq API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Groq integration lives in the `langchain-groq` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3f3f510e-2afe-4e76-be41-c5a9665aea63",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-groq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_groq import ChatGroq\n",
|
||||
"\n",
|
||||
"llm = ChatGroq(\n",
|
||||
" model=\"llama-3.1-8b-instant\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The translation of \"I love programming\" to French is:\\n\\n\"J\\'adore le programmation.\"', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 55, 'total_tokens': 77, 'completion_time': 0.029333333, 'prompt_time': 0.003502892, 'queue_time': 0.553054073, 'total_time': 0.032836225}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-2b2da04a-993c-40ab-becc-201eab8b1a1b-0', usage_metadata={'input_tokens': 55, 'output_tokens': 22, 'total_tokens': 77})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Groq\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The translation of \"I love programming\" to French is:\n",
|
||||
"\n",
|
||||
"\"J'adore le programmation.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 50, 'total_tokens': 56, 'completion_time': 0.008, 'prompt_time': 0.003337935, 'queue_time': 0.20949214500000002, 'total_time': 0.011337935}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-e33b48dc-5e55-466e-9ebd-7b48c81c3cbd-0', usage_metadata={'input_tokens': 50, 'output_tokens': 6, 'total_tokens': 56})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGroq\n",
|
||||
"\n",
|
||||
"This will help you getting started with Groq [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html). For a list of all Groq models, visit this [link](https://console.groq.com/docs/models?utm_source=langchain).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/groq) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatGroq](https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html) | [langchain-groq](https://python.langchain.com/api_reference/groq/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Groq models you'll need to create a Groq account, get an API key, and install the `langchain-groq` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [Groq console](https://console.groq.com/login?utm_source=langchain&utm_content=chat_page) to sign up to Groq and generate an API key. Once you've done this set the GROQ_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"GROQ_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"GROQ_API_KEY\"] = getpass.getpass(\"Enter your Groq API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Groq integration lives in the `langchain-groq` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3f3f510e-2afe-4e76-be41-c5a9665aea63",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-groq"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_groq import ChatGroq\n",
|
||||
"\n",
|
||||
"llm = ChatGroq(\n",
|
||||
" model=\"llama-3.1-8b-instant\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The translation of \"I love programming\" to French is:\\n\\n\"J\\'adore le programmation.\"', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 22, 'prompt_tokens': 55, 'total_tokens': 77, 'completion_time': 0.029333333, 'prompt_time': 0.003502892, 'queue_time': 0.553054073, 'total_time': 0.032836225}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-2b2da04a-993c-40ab-becc-201eab8b1a1b-0', usage_metadata={'input_tokens': 55, 'output_tokens': 22, 'total_tokens': 77})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The translation of \"I love programming\" to French is:\n",
|
||||
"\n",
|
||||
"\"J'adore le programmation.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 50, 'total_tokens': 56, 'completion_time': 0.008, 'prompt_time': 0.003337935, 'queue_time': 0.20949214500000002, 'total_time': 0.011337935}, 'model_name': 'llama-3.1-8b-instant', 'system_fingerprint': 'fp_a491995411', 'finish_reason': 'stop', 'logprobs': None}, id='run-e33b48dc-5e55-466e-9ebd-7b48c81c3cbd-0', usage_metadata={'input_tokens': 50, 'output_tokens': 6, 'total_tokens': 56})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGroq features and configurations head to the API reference: https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGroq features and configurations head to the API reference: https://python.langchain.com/api_reference/groq/chat_models/langchain_groq.chat_models.ChatGroq.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,262 +1,260 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "53fbf15f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: MistralAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d295c2a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatMistralAI\n",
|
||||
"\n",
|
||||
"This will help you getting started with Mistral [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatMistralAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html). The `ChatMistralAI` class is built on top of the [Mistral API](https://docs.mistral.ai/api/). For a list of all the models supported by Mistral, check out [this page](https://docs.mistral.ai/getting-started/models/).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/mistral) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatMistralAI](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html) | [langchain_mistralai](https://python.langchain.com/api_reference/mistralai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"To access `ChatMistralAI` models you'll need to create a Mistral account, get an API key, and install the `langchain_mistralai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"A valid [API key](https://console.mistral.ai/api-keys/) is needed to communicate with the API. Once you've done this set the MISTRAL_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2461605e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"MISTRAL_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"MISTRAL_API_KEY\"] = getpass.getpass(\"Enter your Mistral API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "788f37ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "007209d5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f5c74f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Mistral integration lives in the `langchain_mistralai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ab11a65",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_mistralai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fb1a335e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e6c38580",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_mistralai import ChatMistralAI\n",
|
||||
"\n",
|
||||
"llm = ChatMistralAI(\n",
|
||||
" model=\"mistral-large-latest\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aec79099",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "8838c3cc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Sure, I\\'d be happy to help you translate that sentence into French! The English sentence \"I love programming\" translates to \"J\\'aime programmer\" in French. Let me know if you have any other questions or need further assistance!', response_metadata={'token_usage': {'prompt_tokens': 32, 'total_tokens': 84, 'completion_tokens': 52}, 'model': 'mistral-small', 'finish_reason': 'stop'}, id='run-64bac156-7160-4b68-b67e-4161f63e021f-0', usage_metadata={'input_tokens': 32, 'output_tokens': 52, 'total_tokens': 84})"
|
||||
"cell_type": "raw",
|
||||
"id": "53fbf15f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: MistralAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bbf6a048",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Sure, I'd be happy to help you translate that sentence into French! The English sentence \"I love programming\" translates to \"J'aime programmer\" in French. Let me know if you have any other questions or need further assistance!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "32b87f87",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "24e2c51c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmierung. (German translation)', response_metadata={'token_usage': {'prompt_tokens': 26, 'total_tokens': 38, 'completion_tokens': 12}, 'model': 'mistral-small', 'finish_reason': 'stop'}, id='run-dfd4094f-e347-47b0-9056-8ebd7ea35fe7-0', usage_metadata={'input_tokens': 26, 'output_tokens': 12, 'total_tokens': 38})"
|
||||
"cell_type": "markdown",
|
||||
"id": "d295c2a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatMistralAI\n",
|
||||
"\n",
|
||||
"This will help you getting started with Mistral [chat models](/docs/concepts/chat_models). For detailed documentation of all `ChatMistralAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html). The `ChatMistralAI` class is built on top of the [Mistral API](https://docs.mistral.ai/api/). For a list of all the models supported by Mistral, check out [this page](https://docs.mistral.ai/getting-started/models/).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/mistral) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatMistralAI](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html) | [langchain_mistralai](https://python.langchain.com/api_reference/mistralai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"To access `ChatMistralAI` models you'll need to create a Mistral account, get an API key, and install the `langchain_mistralai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"A valid [API key](https://console.mistral.ai/api-keys/) is needed to communicate with the API. Once you've done this set the MISTRAL_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2461605e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"MISTRAL_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"MISTRAL_API_KEY\"] = getpass.getpass(\"Enter your Mistral API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "788f37ac",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "007209d5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f5c74f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Mistral integration lives in the `langchain_mistralai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ab11a65",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_mistralai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fb1a335e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e6c38580",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_mistralai import ChatMistralAI\n",
|
||||
"\n",
|
||||
"llm = ChatMistralAI(\n",
|
||||
" model=\"mistral-large-latest\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aec79099",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "8838c3cc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Sure, I\\'d be happy to help you translate that sentence into French! The English sentence \"I love programming\" translates to \"J\\'aime programmer\" in French. Let me know if you have any other questions or need further assistance!', response_metadata={'token_usage': {'prompt_tokens': 32, 'total_tokens': 84, 'completion_tokens': 52}, 'model': 'mistral-small', 'finish_reason': 'stop'}, id='run-64bac156-7160-4b68-b67e-4161f63e021f-0', usage_metadata={'input_tokens': 32, 'output_tokens': 52, 'total_tokens': 84})"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "bbf6a048",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Sure, I'd be happy to help you translate that sentence into French! The English sentence \"I love programming\" translates to \"J'aime programmer\" in French. Let me know if you have any other questions or need further assistance!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "32b87f87",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "24e2c51c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmierung. (German translation)', response_metadata={'token_usage': {'prompt_tokens': 26, 'total_tokens': 38, 'completion_tokens': 12}, 'model': 'mistral-small', 'finish_reason': 'stop'}, id='run-dfd4094f-e347-47b0-9056-8ebd7ea35fe7-0', usage_metadata={'input_tokens': 26, 'output_tokens': 12, 'total_tokens': 38})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cb9b5834",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"Head to the [API reference](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html) for detailed documentation of all attributes and methods."
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cb9b5834",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"Head to the [API reference](https://python.langchain.com/api_reference/mistralai/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html) for detailed documentation of all attributes and methods."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,444 +1,442 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Naver\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8444f1a-e907-4f07-b8b6-68fbedfb868e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatClovaX\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Naver’s HyperCLOVA X [chat models](https://python.langchain.com/docs/concepts/chat_models) via CLOVA Studio. For detailed documentation of all ChatClovaX features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.naver.ChatClovaX.html).\n",
|
||||
"\n",
|
||||
"[CLOVA Studio](http://clovastudio.ncloud.com/) has several chat models. You can find information about latest models and their costs, context windows, and supported input types in the CLOVA Studio API Guide [documentation](https://api.ncloud-docs.com/docs/clovastudio-chatcompletions).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- |:-----:| :---: |:------------------------------------------------------------------------:| :---: | :---: |\n",
|
||||
"| [ChatClovaX](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.naver.ChatClovaX.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"|:------------------------------------------:| :---: | :---: | :---: | :---: | :---: |:-----------------------------------------------------:| :---: |:------------------------------------------------------:|:----------------------------------:|\n",
|
||||
"|❌| ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Before using the chat model, you must go through the four steps below.\n",
|
||||
"\n",
|
||||
"1. Creating [NAVER Cloud Platform](https://www.ncloud.com/) account \n",
|
||||
"2. Apply to use [CLOVA Studio](https://www.ncloud.com/product/aiService/clovaStudio)\n",
|
||||
"3. Create a CLOVA Studio Test App or Service App of a model to use (See [here](https://guide.ncloud-docs.com/docs/en/clovastudio-playground01#테스트앱생성).)\n",
|
||||
"4. Issue a Test or Service API key (See [here](https://api.ncloud-docs.com/docs/ai-naver-clovastudio-summary#API%ED%82%A4).)\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Set the `NCP_CLOVASTUDIO_API_KEY` environment variable with your API key.\n",
|
||||
" - Note that if you are using a legacy API Key (that doesn't start with `nv-*` prefix), you might need to get an additional API Key by clicking `App Request Status` > `Service App, Test App List` > `‘Details’ button for each app` in [CLOVA Studio](https://clovastudio.ncloud.com/studio-application/service-app) and set it as `NCP_APIGW_API_KEY`.\n",
|
||||
"\n",
|
||||
"You can add them to your environment variables as below:\n",
|
||||
"\n",
|
||||
"``` bash\n",
|
||||
"export NCP_CLOVASTUDIO_API_KEY=\"your-api-key-here\"\n",
|
||||
"# Uncomment below to use a legacy API key\n",
|
||||
"# export NCP_APIGW_API_KEY=\"your-api-key-here\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2def81b5-b023-4f40-a97b-b2c5ca59d6a9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"NCP_CLOVASTUDIO_API_KEY\"):\n",
|
||||
" os.environ[\"NCP_CLOVASTUDIO_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your NCP CLOVA Studio API Key: \"\n",
|
||||
" )\n",
|
||||
"# Uncomment below to use a legacy API key\n",
|
||||
"# if not os.getenv(\"NCP_APIGW_API_KEY\"):\n",
|
||||
"# os.environ[\"NCP_APIGW_API_KEY\"] = getpass.getpass(\n",
|
||||
"# \"Enter your NCP API Gateway API key: \"\n",
|
||||
"# )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c695442",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6151aeb6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17bf9053-90c5-4955-b239-55a35cb07566",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Naver integration lives in the `langchain-community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# install package\n",
|
||||
"!pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatClovaX\n",
|
||||
"\n",
|
||||
"chat = ChatClovaX(\n",
|
||||
" model=\"HCX-003\",\n",
|
||||
" max_tokens=100,\n",
|
||||
" temperature=0.5,\n",
|
||||
" # clovastudio_api_key=\"...\" # set if you prefer to pass api key directly instead of using environment variables\n",
|
||||
" # task_id=\"...\" # set if you want to use fine-tuned model\n",
|
||||
" # service_app=False # set True if using Service App. Default value is False (means using Test App)\n",
|
||||
" # include_ai_filters=False # set True if you want to detect inappropriate content. Default value is False\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "47752b59",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"In addition to invoke, we also support batch and stream functionalities."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='저는 네이버 AI를 사용하는 것이 좋아요.', additional_kwargs={}, response_metadata={'stop_reason': 'stop_before', 'input_length': 25, 'output_length': 14, 'seed': 1112164354, 'ai_filter': None}, id='run-b57bc356-1148-4007-837d-cc409dbd57cc-0', usage_metadata={'input_tokens': 25, 'output_tokens': 14, 'total_tokens': 39})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Naver\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to Korean. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love using NAVER AI.\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"ai_msg = chat.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "24e7377f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"저는 네이버 AI를 사용하는 것이 좋아요.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='저는 네이버 AI를 사용하는 것이 좋아요.', additional_kwargs={}, response_metadata={'stop_reason': 'stop_before', 'input_length': 25, 'output_length': 14, 'seed': 2575184681, 'ai_filter': None}, id='run-7014b330-eba3-4701-bb62-df73ce39b854-0', usage_metadata={'input_tokens': 25, 'output_tokens': 14, 'total_tokens': 39})"
|
||||
"cell_type": "markdown",
|
||||
"id": "c8444f1a-e907-4f07-b8b6-68fbedfb868e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatClovaX\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with Naver’s HyperCLOVA X [chat models](https://python.langchain.com/docs/concepts/chat_models) via CLOVA Studio. For detailed documentation of all ChatClovaX features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.naver.ChatClovaX.html).\n",
|
||||
"\n",
|
||||
"[CLOVA Studio](http://clovastudio.ncloud.com/) has several chat models. You can find information about latest models and their costs, context windows, and supported input types in the CLOVA Studio API Guide [documentation](https://api.ncloud-docs.com/docs/clovastudio-chatcompletions).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- |:-----:| :---: |:------------------------------------------------------------------------:| :---: | :---: |\n",
|
||||
"| [ChatClovaX](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.naver.ChatClovaX.html) | [langchain-community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"|:------------------------------------------:| :---: | :---: | :---: | :---: | :---: |:-----------------------------------------------------:| :---: |:------------------------------------------------------:|:----------------------------------:|\n",
|
||||
"|❌| ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Before using the chat model, you must go through the four steps below.\n",
|
||||
"\n",
|
||||
"1. Creating [NAVER Cloud Platform](https://www.ncloud.com/) account\n",
|
||||
"2. Apply to use [CLOVA Studio](https://www.ncloud.com/product/aiService/clovaStudio)\n",
|
||||
"3. Create a CLOVA Studio Test App or Service App of a model to use (See [here](https://guide.ncloud-docs.com/docs/en/clovastudio-playground01#테스트앱생성).)\n",
|
||||
"4. Issue a Test or Service API key (See [here](https://api.ncloud-docs.com/docs/ai-naver-clovastudio-summary#API%ED%82%A4).)\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Set the `NCP_CLOVASTUDIO_API_KEY` environment variable with your API key.\n",
|
||||
" - Note that if you are using a legacy API Key (that doesn't start with `nv-*` prefix), you might need to get an additional API Key by clicking `App Request Status` > `Service App, Test App List` > `‘Details’ button for each app` in [CLOVA Studio](https://clovastudio.ncloud.com/studio-application/service-app) and set it as `NCP_APIGW_API_KEY`.\n",
|
||||
"\n",
|
||||
"You can add them to your environment variables as below:\n",
|
||||
"\n",
|
||||
"``` bash\n",
|
||||
"export NCP_CLOVASTUDIO_API_KEY=\"your-api-key-here\"\n",
|
||||
"# Uncomment below to use a legacy API key\n",
|
||||
"# export NCP_APIGW_API_KEY=\"your-api-key-here\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"Korean\",\n",
|
||||
" \"input\": \"I love using NAVER AI.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "66e69286",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2c07af21-dda5-4514-b4de-1f214c2cebcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Certainly! In Korean, \"Hi\" is pronounced as \"안녕\" (annyeong). The first syllable, \"안,\" sounds like the \"ahh\" sound in \"apple,\" while the second syllable, \"녕,\" sounds like the \"yuh\" sound in \"you.\" So when you put them together, it's like saying \"ahhyuh-nyuhng.\" Remember to pronounce each syllable clearly and separately for accurate pronunciation."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant that can teach Korean pronunciation.\"\n",
|
||||
"human = \"Could you let me know how to say '{phrase}' in Korean?\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream({\"phrase\": \"Hi\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Additional functionalities\n",
|
||||
"\n",
|
||||
"### Using fine-tuned models\n",
|
||||
"\n",
|
||||
"You can call fine-tuned models by passing in your corresponding `task_id` parameter. (You don’t need to specify the `model_name` parameter when calling fine-tuned model.)\n",
|
||||
"\n",
|
||||
"You can check `task_id` from corresponding Test App or Service App details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cb436788",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='저는 네이버 AI를 사용하는 것이 너무 좋아요.', additional_kwargs={}, response_metadata={'stop_reason': 'stop_before', 'input_length': 25, 'output_length': 15, 'seed': 52559061, 'ai_filter': None}, id='run-5bea8d4a-48f3-4c34-ae70-66e60dca5344-0', usage_metadata={'input_tokens': 25, 'output_tokens': 15, 'total_tokens': 40})"
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2def81b5-b023-4f40-a97b-b2c5ca59d6a9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"NCP_CLOVASTUDIO_API_KEY\"):\n",
|
||||
" os.environ[\"NCP_CLOVASTUDIO_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your NCP CLOVA Studio API Key: \"\n",
|
||||
" )\n",
|
||||
"# Uncomment below to use a legacy API key\n",
|
||||
"# if not os.getenv(\"NCP_APIGW_API_KEY\"):\n",
|
||||
"# os.environ[\"NCP_APIGW_API_KEY\"] = getpass.getpass(\n",
|
||||
"# \"Enter your NCP API Gateway API key: \"\n",
|
||||
"# )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c695442",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6151aeb6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17bf9053-90c5-4955-b239-55a35cb07566",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Naver integration lives in the `langchain-community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# install package\n",
|
||||
"!pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatClovaX\n",
|
||||
"\n",
|
||||
"chat = ChatClovaX(\n",
|
||||
" model=\"HCX-003\",\n",
|
||||
" max_tokens=100,\n",
|
||||
" temperature=0.5,\n",
|
||||
" # clovastudio_api_key=\"...\" # set if you prefer to pass api key directly instead of using environment variables\n",
|
||||
" # task_id=\"...\" # set if you want to use fine-tuned model\n",
|
||||
" # service_app=False # set True if using Service App. Default value is False (means using Test App)\n",
|
||||
" # include_ai_filters=False # set True if you want to detect inappropriate content. Default value is False\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "47752b59",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"In addition to invoke, we also support batch and stream functionalities."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='저는 네이버 AI를 사용하는 것이 좋아요.', additional_kwargs={}, response_metadata={'stop_reason': 'stop_before', 'input_length': 25, 'output_length': 14, 'seed': 1112164354, 'ai_filter': None}, id='run-b57bc356-1148-4007-837d-cc409dbd57cc-0', usage_metadata={'input_tokens': 25, 'output_tokens': 14, 'total_tokens': 39})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to Korean. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love using NAVER AI.\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"ai_msg = chat.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "24e7377f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"저는 네이버 AI를 사용하는 것이 좋아요.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='저는 네이버 AI를 사용하는 것이 좋아요.', additional_kwargs={}, response_metadata={'stop_reason': 'stop_before', 'input_length': 25, 'output_length': 14, 'seed': 2575184681, 'ai_filter': None}, id='run-7014b330-eba3-4701-bb62-df73ce39b854-0', usage_metadata={'input_tokens': 25, 'output_tokens': 14, 'total_tokens': 39})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"Korean\",\n",
|
||||
" \"input\": \"I love using NAVER AI.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "66e69286",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2c07af21-dda5-4514-b4de-1f214c2cebcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Certainly! In Korean, \"Hi\" is pronounced as \"안녕\" (annyeong). The first syllable, \"안,\" sounds like the \"ahh\" sound in \"apple,\" while the second syllable, \"녕,\" sounds like the \"yuh\" sound in \"you.\" So when you put them together, it's like saying \"ahhyuh-nyuhng.\" Remember to pronounce each syllable clearly and separately for accurate pronunciation."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system = \"You are a helpful assistant that can teach Korean pronunciation.\"\n",
|
||||
"human = \"Could you let me know how to say '{phrase}' in Korean?\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream({\"phrase\": \"Hi\"}):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Additional functionalities\n",
|
||||
"\n",
|
||||
"### Using fine-tuned models\n",
|
||||
"\n",
|
||||
"You can call fine-tuned models by passing in your corresponding `task_id` parameter. (You don’t need to specify the `model_name` parameter when calling fine-tuned model.)\n",
|
||||
"\n",
|
||||
"You can check `task_id` from corresponding Test App or Service App details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cb436788",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='저는 네이버 AI를 사용하는 것이 너무 좋아요.', additional_kwargs={}, response_metadata={'stop_reason': 'stop_before', 'input_length': 25, 'output_length': 15, 'seed': 52559061, 'ai_filter': None}, id='run-5bea8d4a-48f3-4c34-ae70-66e60dca5344-0', usage_metadata={'input_tokens': 25, 'output_tokens': 15, 'total_tokens': 40})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"fine_tuned_model = ChatClovaX(\n",
|
||||
" task_id=\"5s8egt3a\", # set if you want to use fine-tuned model\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"fine_tuned_model.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f428deaf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Service App\n",
|
||||
"\n",
|
||||
"When going live with production-level application using CLOVA Studio, you should apply for and use Service App. (See [here](https://guide.ncloud-docs.com/docs/en/clovastudio-playground01#서비스앱신청).)\n",
|
||||
"\n",
|
||||
"For a Service App, you should use a corresponding Service API key and can only be called with it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dcf566df",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Update environment variables\n",
|
||||
"\n",
|
||||
"os.environ[\"NCP_CLOVASTUDIO_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter NCP CLOVA Studio Service API Key: \"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "cebe27ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatClovaX(\n",
|
||||
" service_app=True, # True if you want to use your service app, default value is False.\n",
|
||||
" # clovastudio_api_key=\"...\" # if you prefer to pass api key in directly instead of using env vars\n",
|
||||
" model=\"HCX-003\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"ai_msg = chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d73e7140",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### AI Filter\n",
|
||||
"\n",
|
||||
"AI Filter detects inappropriate output such as profanity from the test app (or service app included) created in Playground and informs the user. See [here](https://guide.ncloud-docs.com/docs/en/clovastudio-playground01#AIFilter) for details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "32bfbc93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatClovaX(\n",
|
||||
" model=\"HCX-003\",\n",
|
||||
" include_ai_filters=True, # True if you want to enable ai filter\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"ai_msg = chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7bd9e179",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.response_metadata[\"ai_filter\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatNaver features and configurations head to the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.naver.ChatClovaX.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"fine_tuned_model = ChatClovaX(\n",
|
||||
" task_id=\"5s8egt3a\", # set if you want to use fine-tuned model\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"fine_tuned_model.invoke(messages)"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f428deaf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Service App\n",
|
||||
"\n",
|
||||
"When going live with production-level application using CLOVA Studio, you should apply for and use Service App. (See [here](https://guide.ncloud-docs.com/docs/en/clovastudio-playground01#서비스앱신청).)\n",
|
||||
"\n",
|
||||
"For a Service App, you should use a corresponding Service API key and can only be called with it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dcf566df",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Update environment variables\n",
|
||||
"\n",
|
||||
"os.environ[\"NCP_CLOVASTUDIO_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter NCP CLOVA Studio Service API Key: \"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "cebe27ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatClovaX(\n",
|
||||
" service_app=True, # True if you want to use your service app, default value is False.\n",
|
||||
" # clovastudio_api_key=\"...\" # if you prefer to pass api key in directly instead of using env vars\n",
|
||||
" model=\"HCX-003\",\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"ai_msg = chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d73e7140",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### AI Filter\n",
|
||||
"\n",
|
||||
"AI Filter detects inappropriate output such as profanity from the test app (or service app included) created in Playground and informs the user. See [here](https://guide.ncloud-docs.com/docs/en/clovastudio-playground01#AIFilter) for details. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "32bfbc93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatClovaX(\n",
|
||||
" model=\"HCX-003\",\n",
|
||||
" include_ai_filters=True, # True if you want to enable ai filter\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"ai_msg = chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7bd9e179",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.response_metadata[\"ai_filter\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatNaver features and configurations head to the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.naver.ChatClovaX.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
326
docs/docs/integrations/chat/netmind.ipynb
Normal file
326
docs/docs/integrations/chat/netmind.ipynb
Normal file
@@ -0,0 +1,326 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Netmind\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatNetmind\n",
|
||||
"\n",
|
||||
"This will help you getting started with Netmind [chat models](https://www.netmind.ai/). For detailed documentation of all ChatNetmind features and configurations head to the [API reference](https://github.com/protagolabs/langchain-netmind).\n",
|
||||
"\n",
|
||||
"- See https://www.netmind.ai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/) | Package downloads | Package latest |\n",
|
||||
"|:---------------------------------------------------------------------------------------------| :--- |:-----:|:------------:|:--------------------------------------------------------------:| :---: | :---: |\n",
|
||||
"| [ChatNetmind](https://python.langchain.com/api_reference/) | [langchain-netmind](https://python.langchain.com/api_reference/) | ✅ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"|:-----------------------------------------------:|:---------------------------------------------------------:|:---------:|:---------------------------------------------------:|:-----------:|:-----------:|:----------------------------------------------------------:|:------------:|:-----------------------------------------------------------:|:---------------------------------------:|\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Netmind models you'll need to create a/an Netmind account, get an API key, and install the `langchain-netmind` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to https://www.netmind.ai/ to sign up to Netmind and generate an API key. Once you've done this set the NETMIND_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T02:00:30.732333Z",
|
||||
"start_time": "2025-03-20T02:00:28.384208Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"NETMIND_API_KEY\"):\n",
|
||||
" os.environ[\"NETMIND_API_KEY\"] = getpass.getpass(\"Enter your Netmind API key: \")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 1
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T02:00:33.421446Z",
|
||||
"start_time": "2025-03-20T02:00:33.419081Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 2
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Netmind integration lives in the `langchain-netmind` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T02:00:35.923300Z",
|
||||
"start_time": "2025-03-20T02:00:34.505928Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"%pip install -qU langchain-netmind"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m24.0\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m25.0.1\u001B[0m\r\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\r\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 3
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T02:01:08.007764Z",
|
||||
"start_time": "2025-03-20T02:01:07.391951Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"from langchain_netmind import ChatNetmind\n",
|
||||
"\n",
|
||||
"llm = ChatNetmind(\n",
|
||||
" model=\"deepseek-ai/DeepSeek-V3\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 4
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": "## Invocation\n"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": [],
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T02:01:19.011273Z",
|
||||
"start_time": "2025-03-20T02:01:10.295510Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 31, 'total_tokens': 44, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'deepseek-ai/DeepSeek-V3', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-ca6c2010-844d-4bf6-baac-6e248491b000-0', usage_metadata={'input_tokens': 31, 'output_tokens': 13, 'total_tokens': 44, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"execution_count": 5
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T02:01:20.240190Z",
|
||||
"start_time": "2025-03-20T02:01:20.238242Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 6
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T02:01:27.456393Z",
|
||||
"start_time": "2025-03-20T02:01:23.993410Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe es zu programmieren.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 26, 'total_tokens': 40, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'deepseek-ai/DeepSeek-V3', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d63adcc6-53ba-4caa-9a79-78d640b39274-0', usage_metadata={'input_tokens': 26, 'output_tokens': 14, 'total_tokens': 40, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"execution_count": 7
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": ""
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatNetmind features and configurations head to the API reference: \n",
|
||||
"* [API reference](https://python.langchain.com/api_reference/) \n",
|
||||
"* [langchain-netmind](https://github.com/protagolabs/langchain-netmind) \n",
|
||||
"* [pypi](https://pypi.org/project/langchain-netmind/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": "",
|
||||
"id": "30f8be8c940bfbf3"
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -1,267 +1,265 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Together\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatTogether\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This page will help you get started with Together AI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatTogether features and configurations head to the [API reference](https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html).\n",
|
||||
"\n",
|
||||
"[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/chat-models)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/togetherai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatTogether](https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html) | [langchain-together](https://python.langchain.com/api_reference/together/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Together models you'll need to create a/an Together account, get an API key, and install the `langchain-together` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://api.together.ai) to sign up to Together and generate an API key. Once you've done this set the TOGETHER_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"TOGETHER_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"TOGETHER_API_KEY\"] = getpass.getpass(\"Enter your Together API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Together integration lives in the `langchain-together` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-together"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_together import ChatTogether\n",
|
||||
"\n",
|
||||
"llm = ChatTogether(\n",
|
||||
" model=\"meta-llama/Llama-3-70b-chat-hf\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 35, 'total_tokens': 44}, 'model_name': 'meta-llama/Llama-3-70b-chat-hf', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-eabcbe33-cdd8-45b8-ab0b-f90b6e7dfad8-0', usage_metadata={'input_tokens': 35, 'output_tokens': 9, 'total_tokens': 44})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Together\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 30, 'total_tokens': 37}, 'model_name': 'meta-llama/Llama-3-70b-chat-hf', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-a249aa24-ee31-46ba-9bf9-f4eb135b0a95-0', usage_metadata={'input_tokens': 30, 'output_tokens': 7, 'total_tokens': 37})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatTogether\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This page will help you get started with Together AI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all ChatTogether features and configurations head to the [API reference](https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html).\n",
|
||||
"\n",
|
||||
"[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/chat-models)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/togetherai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatTogether](https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html) | [langchain-together](https://python.langchain.com/api_reference/together/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Together models you'll need to create a/an Together account, get an API key, and install the `langchain-together` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://api.together.ai) to sign up to Together and generate an API key. Once you've done this set the TOGETHER_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"TOGETHER_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"TOGETHER_API_KEY\"] = getpass.getpass(\"Enter your Together API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Together integration lives in the `langchain-together` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-together"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_together import ChatTogether\n",
|
||||
"\n",
|
||||
"llm = ChatTogether(\n",
|
||||
" model=\"meta-llama/Llama-3-70b-chat-hf\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 35, 'total_tokens': 44}, 'model_name': 'meta-llama/Llama-3-70b-chat-hf', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-eabcbe33-cdd8-45b8-ab0b-f90b6e7dfad8-0', usage_metadata={'input_tokens': 35, 'output_tokens': 9, 'total_tokens': 44})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 30, 'total_tokens': 37}, 'model_name': 'meta-llama/Llama-3-70b-chat-hf', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-a249aa24-ee31-46ba-9bf9-f4eb135b0a95-0', usage_metadata={'input_tokens': 30, 'output_tokens': 7, 'total_tokens': 37})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatTogether features and configurations head to the API reference: https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatTogether features and configurations head to the API reference: https://python.langchain.com/api_reference/together/chat_models/langchain_together.chat_models.ChatTogether.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -5,21 +5,38 @@
|
||||
"id": "134a0785",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vectara Chat\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"[Vectara](https://vectara.com/) is the trusted AI Assistant and Agent platform which focuses on enterprise readiness for mission-critical applications.\n",
|
||||
"\n",
|
||||
"Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:\n",
|
||||
"1. A way to extract text from files (PDF, PPT, DOCX, etc)\n",
|
||||
"2. ML-based chunking that provides state of the art performance.\n",
|
||||
"3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.\n",
|
||||
"4. Its own internal vector database where text chunks and embedding vectors are stored.\n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). \n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments, including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). \n",
|
||||
"6. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"See the [Vectara API documentation](https://docs.vectara.com/docs/) for more information on how to use the API.\n",
|
||||
"For more information:\n",
|
||||
"- [Documentation](https://docs.vectara.com/docs/)\n",
|
||||
"- [API Playground](https://docs.vectara.com/docs/rest-api/)\n",
|
||||
"- [Quickstart](https://docs.vectara.com/docs/quickstart)\n",
|
||||
"\n",
|
||||
"This notebook shows how to use Vectara's [Chat](https://docs.vectara.com/docs/api-reference/chat-apis/chat-apis-overview) functionality, which provides automatic storage of conversation history and ensures follow up questions consider that history."
|
||||
"\n",
|
||||
"This notebook shows how to use Vectara's [Chat](https://docs.vectara.com/docs/api-reference/chat-apis/chat-apis-overview) functionality, which provides automatic storage of conversation history and ensures follow up questions consider that history.\n",
|
||||
"\n",
|
||||
"### Setup\n",
|
||||
"\n",
|
||||
"To use the `VectaraVectorStore` you first need to install the partner package.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b4a2f525-4805-4880-8bfa-18fe6f1cd1c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!uv pip install -U pip && uv pip install -qU langchain-vectara"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -27,17 +44,19 @@
|
||||
"id": "56372c5b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Getting Started\n",
|
||||
"## Getting Started\n",
|
||||
"\n",
|
||||
"To get started, use the following steps:\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara trial. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara trial.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: `customer ID`, `corpus ID` and `api_key`.\n",
|
||||
"You can provide those to LangChain in two ways:\n",
|
||||
"To use LangChain with Vectara, you'll need to have these two values: `corpus_key` and `api_key`.\n",
|
||||
"You can provide `VECTARA_API_KEY` to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"1. Include in your environment these two variables: `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
" For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
@@ -45,8 +64,6 @@
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = getpass.getpass(\"Vectara Customer ID:\")\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = getpass.getpass(\"Vectara Corpus ID:\")\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
@@ -54,17 +71,16 @@
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectara = Vectara(\n",
|
||||
" vectara_customer_id=vectara_customer_id,\n",
|
||||
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
" )\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"In this notebook we assume they are provided in the environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"id": "70c4e529",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -73,14 +89,15 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<YOUR_VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = \"<YOUR_VECTARA_CORPUS_ID>\"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = \"<YOUR_VECTARA_CUSTOMER_ID>\"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_KEY\"] = \"<VECTARA_CORPUS_KEY>\"\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import Vectara\n",
|
||||
"from langchain_community.vectorstores.vectara import (\n",
|
||||
" RerankConfig,\n",
|
||||
" SummaryConfig,\n",
|
||||
"from langchain_vectara import Vectara\n",
|
||||
"from langchain_vectara.vectorstores import (\n",
|
||||
" CorpusConfig,\n",
|
||||
" GenerationConfig,\n",
|
||||
" MmrReranker,\n",
|
||||
" SearchConfig,\n",
|
||||
" VectaraQueryConfig,\n",
|
||||
")"
|
||||
]
|
||||
@@ -101,7 +118,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"id": "01c46e92",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -110,10 +127,11 @@
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"state_of_the_union.txt\")\n",
|
||||
"loader = TextLoader(\"../document_loaders/example_data/state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"\n",
|
||||
"vectara = Vectara.from_documents(documents, embedding=None)"
|
||||
"corpus_key = os.getenv(\"VECTARA_CORPUS_KEY\")\n",
|
||||
"vectara = Vectara.from_documents(documents, embedding=None, corpus_key=corpus_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -126,18 +144,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"id": "1b41a10b-bf68-4689-8f00-9aed7675e2ab",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang=\"eng\")\n",
|
||||
"rerank_config = RerankConfig(reranker=\"mmr\", rerank_k=50, mmr_diversity_bias=0.2)\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config\n",
|
||||
"generation_config = GenerationConfig(\n",
|
||||
" max_used_search_results=7,\n",
|
||||
" response_language=\"eng\",\n",
|
||||
" generation_preset_name=\"vectara-summary-ext-24-05-med-omni\",\n",
|
||||
" enable_factual_consistency_score=True,\n",
|
||||
")\n",
|
||||
"search_config = SearchConfig(\n",
|
||||
" corpora=[CorpusConfig(corpus_key=corpus_key, limit=25)],\n",
|
||||
" reranker=MmrReranker(diversity_bias=0.2),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" search=search_config,\n",
|
||||
" generation=generation_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"bot = vectara.as_chat(config)"
|
||||
]
|
||||
@@ -147,12 +176,15 @@
|
||||
"id": "83f38c18-ac82-45f4-a79e-8b37ce1ae115",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"Here's an example of asking a question with no chat history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"id": "bc672290-8a8b-4828-a90c-f1bbdd6b3920",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -161,10 +193,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The President expressed gratitude to Justice Breyer and highlighted the significance of nominating Ketanji Brown Jackson to the Supreme Court, praising her legal expertise and commitment to upholding excellence [1]. The President also reassured the public about the situation with gas prices and the conflict in Ukraine, emphasizing unity with allies and the belief that the world will emerge stronger from these challenges [2][4]. Additionally, the President shared personal experiences related to economic struggles and the importance of passing the American Rescue Plan to support those in need [3]. The focus was also on job creation and economic growth, acknowledging the impact of inflation on families [5]. While addressing cancer as a significant issue, the President discussed plans to enhance cancer research and support for patients and families [7].'"
|
||||
"'The president stated that nominating someone to serve on the United States Supreme Court is one of the most serious constitutional responsibilities. He nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, describing her as one of the nation’s top legal minds who will continue Justice Breyer’s legacy of excellence and noting her experience as a former top litigator in private practice [1].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -183,7 +215,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"id": "9c95460b-7116-4155-a9d2-c0fb027ee592",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -192,10 +224,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"In his remarks, the President specified that Ketanji Brown Jackson is succeeding Justice Breyer on the United States Supreme Court[1]. The President praised Jackson as a top legal mind who will continue Justice Breyer's legacy of excellence. The nomination of Jackson was highlighted as a significant constitutional responsibility of the President[1]. The President emphasized the importance of this nomination and the qualities that Jackson brings to the role. The focus was on the transition from Justice Breyer to Judge Ketanji Brown Jackson on the Supreme Court[1].\""
|
||||
"'Yes, the president mentioned that Ketanji Brown Jackson succeeded Justice Breyer [1].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -217,7 +249,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"id": "936dc62f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -227,14 +259,14 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Judge Ketanji Brown Jackson is a nominee for the United States Supreme Court, known for her legal expertise and experience as a former litigator. She is praised for her potential to continue the legacy of excellence on the Court[1]. While the search results provide information on various topics like innovation, economic growth, and healthcare initiatives, they do not directly address Judge Ketanji Brown Jackson's specific accomplishments. Therefore, I do not have enough information to answer this question."
|
||||
"The president acknowledged the significant impact of COVID-19 on the nation, expressing understanding of the public's fatigue and frustration. He emphasized the need to view COVID-19 not as a partisan issue but as a serious disease, urging unity among Americans. The president highlighted the progress made, noting that severe cases have decreased significantly, and mentioned new CDC guidelines allowing most Americans to be mask-free. He also pointed out the efforts to vaccinate the nation and provide economic relief, and the ongoing commitment to vaccinate the world [2], [3], [5]."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = {}\n",
|
||||
"curr_key = None\n",
|
||||
"for chunk in bot.stream(\"what about her accopmlishments?\"):\n",
|
||||
"for chunk in bot.stream(\"what did he said about the covid?\"):\n",
|
||||
" for key in chunk:\n",
|
||||
" if key not in output:\n",
|
||||
" output[key] = chunk[key]\n",
|
||||
@@ -244,6 +276,83 @@
|
||||
" print(chunk[key], end=\"\", flush=True)\n",
|
||||
" curr_key = key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cefdf72b1d90085a",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"For additional capabilities you can use chaining."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "167bc806-395e-46bf-80cc-3c5d43164f42",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"So, the president talked about how the COVID-19 sickness has affected a lot of people in the country. He said that it's important for everyone to work together to fight the sickness, no matter what political party they are in. The president also mentioned that they are working hard to give vaccines to people to help protect them from getting sick. They are also giving money and help to people who need it, like food, housing, and cheaper health insurance. The president also said that they are sending vaccines to many other countries to help people all around the world stay healthy.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that explains the stuff to a five year old. Vectara is providing the answer.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{vectara_response}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_vectara_response(question: dict) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Calls Vectara as_chat and returns the answer string. This encapsulates\n",
|
||||
" the Vectara call.\n",
|
||||
" \"\"\"\n",
|
||||
" try:\n",
|
||||
" response = bot.invoke(question[\"question\"])\n",
|
||||
" return response[\"answer\"]\n",
|
||||
" except Exception as e:\n",
|
||||
" return \"I'm sorry, I couldn't get an answer from Vectara.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create the chain\n",
|
||||
"chain = get_vectara_response | prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Invoke the chain\n",
|
||||
"result = chain.invoke({\"question\": \"what did he say about the covid?\"})\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3b8bb761-db4a-436c-8939-41e9f8652083",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"You can look at the [Chat](https://docs.vectara.com/docs/api-reference/chat-apis/chat-apis-overview) documentation for the details."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -262,7 +371,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.8"
|
||||
"version": "3.12.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
@@ -1,244 +1,242 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "eb65deaa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: vLLM Chat\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f82e243-f4ee-44e2-b417-099b6401ae3e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# vLLM Chat\n",
|
||||
"\n",
|
||||
"vLLM can be deployed as a server that mimics the OpenAI API protocol. This allows vLLM to be used as a drop-in replacement for applications using OpenAI API. This server can be queried in the same format as OpenAI API.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"This will help you getting started with vLLM [chat models](/docs/concepts/chat_models), which leverage the `langchain-openai` package. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html).\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOpenAI](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [langchain_openai](https://python.langchain.com/api_reference/openai/) | ✅ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"Specific model features-- such as tool calling, support for multi-modal inputs, support for token-level streaming, etc.-- will depend on the hosted model.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"See the vLLM docs [here](https://docs.vllm.ai/en/latest/).\n",
|
||||
"\n",
|
||||
"To access vLLM models through LangChain, you'll need to install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Authentication will depend on specifics of the inference server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3b1707a-cf2c-4367-94e3-436c43402503",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1e40bd5e-cbaa-41ef-aaf9-0858eb207184",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0739b647-609b-46d3-bdd3-e86fe4463288",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain vLLM integration can be accessed via the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7afcfbdc-56aa-4529-825a-8acbe7aa5241",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2cf576d6-7b67-4937-bf99-39071e85720c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "060a2e3d-d42f-4221-bd09-a9a06544dcd3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.prompts.chat import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "bf24d732-68a9-44fd-b05d-4903ce5620c6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_server_url = \"http://localhost:8000/v1\"\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"mosaicml/mpt-7b\",\n",
|
||||
" openai_api_key=\"EMPTY\",\n",
|
||||
" openai_api_base=inference_server_url,\n",
|
||||
" max_tokens=5,\n",
|
||||
" temperature=0,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "34b18328-5e8b-4ff2-9b89-6fbb76b5c7f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "aea4e363-5688-4b07-82ed-6aa8153c2377",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Io amo programmare', additional_kwargs={}, example=False)"
|
||||
"cell_type": "raw",
|
||||
"id": "eb65deaa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: vLLM Chat\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f82e243-f4ee-44e2-b417-099b6401ae3e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# vLLM Chat\n",
|
||||
"\n",
|
||||
"vLLM can be deployed as a server that mimics the OpenAI API protocol. This allows vLLM to be used as a drop-in replacement for applications using OpenAI API. This server can be queried in the same format as OpenAI API.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"This will help you getting started with vLLM [chat models](/docs/concepts/chat_models), which leverage the `langchain-openai` package. For detailed documentation of all `ChatOpenAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html).\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOpenAI](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [langchain_openai](https://python.langchain.com/api_reference/openai/) | ✅ | beta | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"Specific model features-- such as tool calling, support for multi-modal inputs, support for token-level streaming, etc.-- will depend on the hosted model.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"See the vLLM docs [here](https://docs.vllm.ai/en/latest/).\n",
|
||||
"\n",
|
||||
"To access vLLM models through LangChain, you'll need to install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Authentication will depend on specifics of the inference server."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3b1707a-cf2c-4367-94e3-436c43402503",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1e40bd5e-cbaa-41ef-aaf9-0858eb207184",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0739b647-609b-46d3-bdd3-e86fe4463288",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain vLLM integration can be accessed via the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7afcfbdc-56aa-4529-825a-8acbe7aa5241",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2cf576d6-7b67-4937-bf99-39071e85720c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "060a2e3d-d42f-4221-bd09-a9a06544dcd3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.prompts.chat import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "bf24d732-68a9-44fd-b05d-4903ce5620c6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inference_server_url = \"http://localhost:8000/v1\"\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"mosaicml/mpt-7b\",\n",
|
||||
" openai_api_key=\"EMPTY\",\n",
|
||||
" openai_api_base=inference_server_url,\n",
|
||||
" max_tokens=5,\n",
|
||||
" temperature=0,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "34b18328-5e8b-4ff2-9b89-6fbb76b5c7f0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "aea4e363-5688-4b07-82ed-6aa8153c2377",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Io amo programmare', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant that translates English to Italian.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate the following sentence from English to Italian: I love programming.\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"llm.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a580a1e4-11a3-4277-bfba-bfb414ac7201",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dd0f4043-48bd-4245-8bdb-e7669666a277",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "265f5d51-0a76-4808-8d13-ef598ee6e366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all features and configurations exposed via `langchain-openai`, head to the API reference: https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html\n",
|
||||
"\n",
|
||||
"Refer to the vLLM [documentation](https://docs.vllm.ai/en/latest/) as well."
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant that translates English to Italian.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate the following sentence from English to Italian: I love programming.\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"llm.invoke(messages)"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a580a1e4-11a3-4277-bfba-bfb414ac7201",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dd0f4043-48bd-4245-8bdb-e7669666a277",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "265f5d51-0a76-4808-8d13-ef598ee6e366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all features and configurations exposed via `langchain-openai`, head to the API reference: https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html\n",
|
||||
"\n",
|
||||
"Refer to the vLLM [documentation](https://docs.vllm.ai/en/latest/) as well."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,332 +1,330 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: xAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatXAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This page will help you get started with xAI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all `ChatXAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html).\n",
|
||||
"\n",
|
||||
"[xAI](https://console.x.ai/) offers an API to interact with Grok models.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/xai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatXAI](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html) | [langchain-xai](https://python.langchain.com/api_reference/xai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ✅ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access xAI models you'll need to create an xAI account, get an API key, and install the `langchain-xai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://console.x.ai/) to sign up for xAI and generate an API key. Once you've done this set the `XAI_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"XAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"XAI_API_KEY\"] = getpass.getpass(\"Enter your xAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain xAI integration lives in the `langchain-xai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-xai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_xai import ChatXAI\n",
|
||||
"\n",
|
||||
"llm = ChatXAI(\n",
|
||||
" model=\"grok-beta\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 30, 'total_tokens': 36, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'stop', 'logprobs': None}, id='run-adffb7a3-e48a-4f52-b694-340d85abe5c3-0', usage_metadata={'input_tokens': 30, 'output_tokens': 6, 'total_tokens': 36, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: xAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 25, 'total_tokens': 32, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'stop', 'logprobs': None}, id='run-569fc8dc-101b-4e6d-864e-d4fa80df2b63-0', usage_metadata={'input_tokens': 25, 'output_tokens': 7, 'total_tokens': 32, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatXAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This page will help you get started with xAI [chat models](../../concepts/chat_models.mdx). For detailed documentation of all `ChatXAI` features and configurations head to the [API reference](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html).\n",
|
||||
"\n",
|
||||
"[xAI](https://console.x.ai/) offers an API to interact with Grok models.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/xai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatXAI](https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html) | [langchain-xai](https://python.langchain.com/api_reference/xai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](../../how_to/tool_calling.ipynb) | [Structured output](../../how_to/structured_output.ipynb) | JSON mode | [Image input](../../how_to/multimodal_inputs.ipynb) | Audio input | Video input | [Token-level streaming](../../how_to/chat_streaming.ipynb) | Native async | [Token usage](../../how_to/chat_token_usage_tracking.ipynb) | [Logprobs](../../how_to/logprobs.ipynb) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ✅ | ✅ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access xAI models you'll need to create an xAI account, get an API key, and install the `langchain-xai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [this page](https://console.x.ai/) to sign up for xAI and generate an API key. Once you've done this set the `XAI_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e074bce1-0994-4b83-b393-ae7aa7e21750",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"ChatXAI has a [tool calling](https://docs.x.ai/docs#capabilities) (we use \"tool calling\" and \"function calling\" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. Tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n",
|
||||
"\n",
|
||||
"### ChatXAI.bind_tools()\n",
|
||||
"\n",
|
||||
"With `ChatXAI.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an OpenAI tool schemas, which looks like:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"name\": \"...\",\n",
|
||||
" \"description\": \"...\",\n",
|
||||
" \"parameters\": {...} # JSONSchema\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"and passed in every model invocation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c6bfe929-ec02-46bd-9d54-76350edddabc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5265c892-d8c2-48af-aef5-adbee1647ba6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I am retrieving the current weather for San Francisco.', additional_kwargs={'tool_calls': [{'id': '0', 'function': {'arguments': '{\"location\":\"San Francisco, CA\"}', 'name': 'GetWeather'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 151, 'total_tokens': 162, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-73707da7-afec-4a52-bee1-a176b0ab8585-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': '0', 'type': 'tool_call'}], usage_metadata={'input_tokens': 151, 'output_tokens': 11, 'total_tokens': 162, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"XAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"XAI_API_KEY\"] = getpass.getpass(\"Enter your xAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain xAI integration lives in the `langchain-xai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-xai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_xai import ChatXAI\n",
|
||||
"\n",
|
||||
"llm = ChatXAI(\n",
|
||||
" model=\"grok-beta\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 30, 'total_tokens': 36, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'stop', 'logprobs': None}, id='run-adffb7a3-e48a-4f52-b694-340d85abe5c3-0', usage_metadata={'input_tokens': 30, 'output_tokens': 6, 'total_tokens': 36, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore programmer.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](../../how_to/sequence.ipynb) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 25, 'total_tokens': 32, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'stop', 'logprobs': None}, id='run-569fc8dc-101b-4e6d-864e-d4fa80df2b63-0', usage_metadata={'input_tokens': 25, 'output_tokens': 7, 'total_tokens': 32, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e074bce1-0994-4b83-b393-ae7aa7e21750",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool calling\n",
|
||||
"\n",
|
||||
"ChatXAI has a [tool calling](https://docs.x.ai/docs#capabilities) (we use \"tool calling\" and \"function calling\" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. Tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n",
|
||||
"\n",
|
||||
"### ChatXAI.bind_tools()\n",
|
||||
"\n",
|
||||
"With `ChatXAI.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an OpenAI tool schemas, which looks like:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"name\": \"...\",\n",
|
||||
" \"description\": \"...\",\n",
|
||||
" \"parameters\": {...} # JSONSchema\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"and passed in every model invocation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c6bfe929-ec02-46bd-9d54-76350edddabc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class GetWeather(BaseModel):\n",
|
||||
" \"\"\"Get the current weather in a given location\"\"\"\n",
|
||||
"\n",
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "5265c892-d8c2-48af-aef5-adbee1647ba6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I am retrieving the current weather for San Francisco.', additional_kwargs={'tool_calls': [{'id': '0', 'function': {'arguments': '{\"location\":\"San Francisco, CA\"}', 'name': 'GetWeather'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 151, 'total_tokens': 162, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'grok-beta', 'system_fingerprint': 'fp_14b89b2dfc', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-73707da7-afec-4a52-bee1-a176b0ab8585-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': '0', 'type': 'tool_call'}], usage_metadata={'input_tokens': 151, 'output_tokens': 11, 'total_tokens': 162, 'input_token_details': {}, 'output_token_details': {}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `ChatXAI` features and configurations head to the API reference: https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
")\n",
|
||||
"ai_msg"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `ChatXAI` features and configurations head to the API reference: https://python.langchain.com/api_reference/xai/chat_models/langchain_xai.chat_models.ChatXAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,229 +1,227 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatYI\n",
|
||||
"\n",
|
||||
"This will help you getting started with Yi [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatYi features and configurations head to the [API reference](https://python.langchain.com/api_reference/lanchain_community/chat_models/lanchain_community.chat_models.yi.ChatYi.html).\n",
|
||||
"\n",
|
||||
"[01.AI](https://www.lingyiwanwu.com/en), founded by Dr. Kai-Fu Lee, is a global company at the forefront of AI 2.0. They offer cutting-edge large language models, including the Yi series, which range from 6B to hundreds of billions of parameters. 01.AI also provides multimodal models, an open API platform, and open-source options like Yi-34B/9B/6B and Yi-VL.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatYi](https://python.langchain.com/api_reference/lanchain_community/chat_models/lanchain_community.chat_models.yi.ChatYi.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access ChatYi models you'll need to create a/an 01.AI account, get an API key, and install the `langchain_community` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [01.AI](https://platform.01.ai) to sign up to 01.AI and generate an API key. Once you've done this set the `YI_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"YI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"YI_API_KEY\"] = getpass.getpass(\"Enter your Yi API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `langchain_community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.yi import ChatYi\n",
|
||||
"\n",
|
||||
"llm = ChatYi(\n",
|
||||
" model=\"yi-large\",\n",
|
||||
" temperature=0,\n",
|
||||
" timeout=60,\n",
|
||||
" yi_api_base=\"https://api.01.ai/v1/chat/completions\",\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Large Language Models (LLMs) have the potential to significantly impact healthcare by enhancing various aspects of patient care, research, and administrative processes. Here are some potential applications:\\n\\n1. **Clinical Documentation and Reporting**: LLMs can assist in generating patient reports and documentation by understanding and summarizing clinical notes, making the process more efficient and reducing the administrative burden on healthcare professionals.\\n\\n2. **Medical Coding and Billing**: These models can help in automating the coding process for medical billing by accurately translating clinical notes into standardized codes, reducing errors and improving billing efficiency.\\n\\n3. **Clinical Decision Support**: LLMs can analyze patient data and medical literature to provide evidence-based recommendations to healthcare providers, aiding in diagnosis and treatment planning.\\n\\n4. **Patient Education and Communication**: By simplifying medical jargon, LLMs can help in educating patients about their conditions, treatment options, and preventive care, improving patient engagement and health literacy.\\n\\n5. **Natural Language Processing (NLP) for EHRs**: LLMs can enhance NLP capabilities in Electronic Health Records (EHRs) systems, enabling better extraction of information from unstructured data, such as clinical notes, to support data-driven decision-making.\\n\\n6. **Drug Discovery and Development**: LLMs can analyze biomedical literature and clinical trial data to identify new drug candidates, predict drug interactions, and support the development of personalized medicine.\\n\\n7. **Telemedicine and Virtual Health Assistants**: Integrated into telemedicine platforms, LLMs can provide preliminary assessments and triage, offering patients basic health advice and determining the urgency of their needs, thus optimizing the utilization of healthcare resources.\\n\\n8. **Research and Literature Review**: LLMs can expedite the process of reviewing medical literature by quickly identifying relevant studies and summarizing findings, accelerating research and evidence-based practice.\\n\\n9. **Personalized Medicine**: By analyzing a patient's genetic information and medical history, LLMs can help in tailoring treatment plans and medication dosages, contributing to the advancement of personalized medicine.\\n\\n10. **Quality Improvement and Risk Assessment**: LLMs can analyze healthcare data to identify patterns that may indicate areas for quality improvement or potential risks, such as hospital-acquired infections or adverse drug events.\\n\\n11. **Mental Health Support**: LLMs can provide mental health support by offering coping strategies, mindfulness exercises, and preliminary assessments, serving as a complement to professional mental health services.\\n\\n12. **Continuing Medical Education (CME)**: LLMs can personalize CME by recommending educational content based on a healthcare provider's practice area, patient demographics, and emerging medical literature, ensuring that professionals stay updated with the latest advancements.\\n\\nWhile the applications of LLMs in healthcare are promising, it's crucial to address challenges such as data privacy, model bias, and the need for regulatory approval to ensure that these technologies are implemented safely and ethically.\", response_metadata={'token_usage': {'completion_tokens': 656, 'prompt_tokens': 40, 'total_tokens': 696}, 'model': 'yi-large'}, id='run-870850bd-e4bf-4265-8730-1736409c0acf-0')"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatYI\n",
|
||||
"\n",
|
||||
"This will help you getting started with Yi [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatYi features and configurations head to the [API reference](https://python.langchain.com/api_reference/lanchain_community/chat_models/lanchain_community.chat_models.yi.ChatYi.html).\n",
|
||||
"\n",
|
||||
"[01.AI](https://www.lingyiwanwu.com/en), founded by Dr. Kai-Fu Lee, is a global company at the forefront of AI 2.0. They offer cutting-edge large language models, including the Yi series, which range from 6B to hundreds of billions of parameters. 01.AI also provides multimodal models, an open API platform, and open-source options like Yi-34B/9B/6B and Yi-VL.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatYi](https://python.langchain.com/api_reference/lanchain_community/chat_models/lanchain_community.chat_models.yi.ChatYi.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access ChatYi models you'll need to create a/an 01.AI account, get an API key, and install the `langchain_community` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [01.AI](https://platform.01.ai) to sign up to 01.AI and generate an API key. Once you've done this set the `YI_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are an AI assistant specializing in technology trends.\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"What are the potential applications of large language models in healthcare?\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 33, 'total_tokens': 41}, 'model': 'yi-large'}, id='run-daa3bc58-8289-4d72-a24e-80622fa90d6d-0')"
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"YI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"YI_API_KEY\"] = getpass.getpass(\"Enter your Yi API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `langchain_community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.yi import ChatYi\n",
|
||||
"\n",
|
||||
"llm = ChatYi(\n",
|
||||
" model=\"yi-large\",\n",
|
||||
" temperature=0,\n",
|
||||
" timeout=60,\n",
|
||||
" yi_api_base=\"https://api.01.ai/v1/chat/completions\",\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Large Language Models (LLMs) have the potential to significantly impact healthcare by enhancing various aspects of patient care, research, and administrative processes. Here are some potential applications:\\n\\n1. **Clinical Documentation and Reporting**: LLMs can assist in generating patient reports and documentation by understanding and summarizing clinical notes, making the process more efficient and reducing the administrative burden on healthcare professionals.\\n\\n2. **Medical Coding and Billing**: These models can help in automating the coding process for medical billing by accurately translating clinical notes into standardized codes, reducing errors and improving billing efficiency.\\n\\n3. **Clinical Decision Support**: LLMs can analyze patient data and medical literature to provide evidence-based recommendations to healthcare providers, aiding in diagnosis and treatment planning.\\n\\n4. **Patient Education and Communication**: By simplifying medical jargon, LLMs can help in educating patients about their conditions, treatment options, and preventive care, improving patient engagement and health literacy.\\n\\n5. **Natural Language Processing (NLP) for EHRs**: LLMs can enhance NLP capabilities in Electronic Health Records (EHRs) systems, enabling better extraction of information from unstructured data, such as clinical notes, to support data-driven decision-making.\\n\\n6. **Drug Discovery and Development**: LLMs can analyze biomedical literature and clinical trial data to identify new drug candidates, predict drug interactions, and support the development of personalized medicine.\\n\\n7. **Telemedicine and Virtual Health Assistants**: Integrated into telemedicine platforms, LLMs can provide preliminary assessments and triage, offering patients basic health advice and determining the urgency of their needs, thus optimizing the utilization of healthcare resources.\\n\\n8. **Research and Literature Review**: LLMs can expedite the process of reviewing medical literature by quickly identifying relevant studies and summarizing findings, accelerating research and evidence-based practice.\\n\\n9. **Personalized Medicine**: By analyzing a patient's genetic information and medical history, LLMs can help in tailoring treatment plans and medication dosages, contributing to the advancement of personalized medicine.\\n\\n10. **Quality Improvement and Risk Assessment**: LLMs can analyze healthcare data to identify patterns that may indicate areas for quality improvement or potential risks, such as hospital-acquired infections or adverse drug events.\\n\\n11. **Mental Health Support**: LLMs can provide mental health support by offering coping strategies, mindfulness exercises, and preliminary assessments, serving as a complement to professional mental health services.\\n\\n12. **Continuing Medical Education (CME)**: LLMs can personalize CME by recommending educational content based on a healthcare provider's practice area, patient demographics, and emerging medical literature, ensuring that professionals stay updated with the latest advancements.\\n\\nWhile the applications of LLMs in healthcare are promising, it's crucial to address challenges such as data privacy, model bias, and the need for regulatory approval to ensure that these technologies are implemented safely and ethically.\", response_metadata={'token_usage': {'completion_tokens': 656, 'prompt_tokens': 40, 'total_tokens': 696}, 'model': 'yi-large'}, id='run-870850bd-e4bf-4265-8730-1736409c0acf-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are an AI assistant specializing in technology trends.\"),\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"What are the potential applications of large language models in healthcare?\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 33, 'total_tokens': 41}, 'model': 'yi-large'}, id='run-daa3bc58-8289-4d72-a24e-80622fa90d6d-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatYi features and configurations head to the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.yi.ChatYi.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatYi features and configurations head to the API reference: https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.yi.ChatYi.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
|
||||
@@ -1,466 +1,464 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Box\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BoxLoader and BoxBlobLoader\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The `langchain-box` package provides two methods to index your files from Box: `BoxLoader` and `BoxBlobLoader`. `BoxLoader` allows you to ingest text representations of files that have a text representation in Box. The `BoxBlobLoader` allows you download the blob for any document or image file for processing with the blob parser of your choice.\n",
|
||||
"\n",
|
||||
"This notebook details getting started with both of these. For detailed documentation of all BoxLoader features and configurations head to the API Reference pages for [BoxLoader](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.document_loaders.box.BoxLoader.html) and [BoxBlobLoader](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.blob_loaders.box.BoxBlobLoader.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"The `BoxLoader` class helps you get your unstructured content from Box in Langchain's `Document` format. You can do this with either a `List[str]` containing Box file IDs, or with a `str` containing a Box folder ID. \n",
|
||||
"\n",
|
||||
"The `BoxBlobLoader` class helps you get your unstructured content from Box in Langchain's `Blob` format. You can do this with a `List[str]` containing Box file IDs, a `str` containing a Box folder ID, a search query, or a `BoxMetadataQuery`. \n",
|
||||
"\n",
|
||||
"If getting files from a folder with folder ID, you can also set a `Bool` to tell the loader to get all sub-folders in that folder, as well. \n",
|
||||
"\n",
|
||||
":::info\n",
|
||||
"A Box instance can contain Petabytes of files, and folders can contain millions of files. Be intentional when choosing what folders you choose to index. And we recommend never getting all files from folder 0 recursively. Folder ID 0 is your root folder.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"The `BoxLoader` will skip files without a text representation, while the `BoxBlobLoader` will return blobs for all document and image files.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [BoxLoader](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.document_loaders.box.BoxLoader.html) | [langchain_box](https://python.langchain.com/api_reference/box/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"| [BoxBlobLoader](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.blob_loaders.box.BoxBlobLoader.html) | [langchain_box](https://python.langchain.com/api_reference/box/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| BoxLoader | ✅ | ❌ | \n",
|
||||
"| BoxBlobLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"In order to use the Box package, you will need a few things:\n",
|
||||
"\n",
|
||||
"* A Box account — If you are not a current Box customer or want to test outside of your production Box instance, you can use a [free developer account](https://account.box.com/signup/n/developer#ty9l3).\n",
|
||||
"* [A Box app](https://developer.box.com/guides/getting-started/first-application/) — This is configured in the [developer console](https://account.box.com/developers/console), and for Box AI, must have the `Manage AI` scope enabled. Here you will also select your authentication method\n",
|
||||
"* The app must be [enabled by the administrator](https://developer.box.com/guides/authorization/custom-app-approval/#manual-approval). For free developer accounts, this is whomever signed up for the account.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"For these examples, we will use [token authentication](https://developer.box.com/guides/authentication/tokens/developer-tokens). This can be used with any [authentication method](https://developer.box.com/guides/authentication/). Just get the token with whatever methodology. If you want to learn more about how to use other authentication types with `langchain-box`, visit the [Box provider](/docs/integrations/providers/box) document.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your Box Developer Token: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"box_developer_token = getpass.getpass(\"Enter your Box Developer Token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_box**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_box"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"### Load files\n",
|
||||
"\n",
|
||||
"If you wish to load files, you must provide the `List` of file ids at instantiation time. \n",
|
||||
"\n",
|
||||
"This requires 1 piece of information:\n",
|
||||
"\n",
|
||||
"* **box_file_ids** (`List[str]`)- A list of Box file IDs.\n",
|
||||
"\n",
|
||||
"#### BoxLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.document_loaders import BoxLoader\n",
|
||||
"\n",
|
||||
"box_file_ids = [\"1514555423624\", \"1514553902288\"]\n",
|
||||
"\n",
|
||||
"loader = BoxLoader(\n",
|
||||
" box_developer_token=box_developer_token,\n",
|
||||
" box_file_ids=box_file_ids,\n",
|
||||
" character_limit=10000, # Optional. Defaults to no limit\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### BoxBlobLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.blob_loaders import BoxBlobLoader\n",
|
||||
"\n",
|
||||
"box_file_ids = [\"1514555423624\", \"1514553902288\"]\n",
|
||||
"\n",
|
||||
"loader = BoxBlobLoader(\n",
|
||||
" box_developer_token=box_developer_token, box_file_ids=box_file_ids\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load from folder\n",
|
||||
"\n",
|
||||
"If you wish to load files from a folder, you must provide a `str` with the Box folder ID at instantiation time. \n",
|
||||
"\n",
|
||||
"This requires 1 piece of information:\n",
|
||||
"\n",
|
||||
"* **box_folder_id** (`str`)- A string containing a Box folder ID.\n",
|
||||
"\n",
|
||||
"#### BoxLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.document_loaders import BoxLoader\n",
|
||||
"\n",
|
||||
"box_folder_id = \"260932470532\"\n",
|
||||
"\n",
|
||||
"loader = BoxLoader(\n",
|
||||
" box_folder_id=box_folder_id,\n",
|
||||
" recursive=False, # Optional. return entire tree, defaults to False\n",
|
||||
" character_limit=10000, # Optional. Defaults to no limit\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### BoxBlobLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.blob_loaders import BoxBlobLoader\n",
|
||||
"\n",
|
||||
"box_folder_id = \"260932470532\"\n",
|
||||
"\n",
|
||||
"loader = BoxBlobLoader(\n",
|
||||
" box_folder_id=box_folder_id,\n",
|
||||
" recursive=False, # Optional. return entire tree, defaults to False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Search for files with BoxBlobLoader\n",
|
||||
"\n",
|
||||
"If you need to search for files, the `BoxBlobLoader` offers two methods. First you can perform a full text search with optional search options to narrow down that search.\n",
|
||||
"\n",
|
||||
"This requires 1 piece of information:\n",
|
||||
"\n",
|
||||
"* **query** (`str`)- A string containing the search query to perform.\n",
|
||||
"\n",
|
||||
"You can also provide a `BoxSearchOptions` object to narrow down that search\n",
|
||||
"* **box_search_options** (`BoxSearchOptions`)\n",
|
||||
"\n",
|
||||
"#### BoxBlobLoader search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.blob_loaders import BoxBlobLoader\n",
|
||||
"from langchain_box.utilities import BoxSearchOptions, DocumentFiles, SearchTypeFilter\n",
|
||||
"\n",
|
||||
"box_folder_id = \"260932470532\"\n",
|
||||
"\n",
|
||||
"box_search_options = BoxSearchOptions(\n",
|
||||
" ancestor_folder_ids=[box_folder_id],\n",
|
||||
" search_type_filter=[SearchTypeFilter.FILE_CONTENT],\n",
|
||||
" created_date_range=[\"2023-01-01T00:00:00-07:00\", \"2024-08-01T00:00:00-07:00,\"],\n",
|
||||
" file_extensions=[DocumentFiles.DOCX, DocumentFiles.PDF],\n",
|
||||
" k=200,\n",
|
||||
" size_range=[1, 1000000],\n",
|
||||
" updated_data_range=None,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = BoxBlobLoader(\n",
|
||||
" box_developer_token=box_developer_token,\n",
|
||||
" query=\"Victor\",\n",
|
||||
" box_search_options=box_search_options,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also search for content based on Box Metadata. If your Box instance uses Metadata, you can search for any documents that have a specific Metadata Template attached that meet a certain criteria, like returning any invoices with a total greater than or equal to $500 that were created last quarter.\n",
|
||||
"\n",
|
||||
"This requires 1 piece of information:\n",
|
||||
"\n",
|
||||
"* **query** (`str`)- A string containing the search query to perform.\n",
|
||||
"\n",
|
||||
"You can also provide a `BoxSearchOptions` object to narrow down that search\n",
|
||||
"* **box_search_options** (`BoxSearchOptions`)\n",
|
||||
"\n",
|
||||
"#### BoxBlobLoader Metadata query"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.blob_loaders import BoxBlobLoader\n",
|
||||
"from langchain_box.utilities import BoxMetadataQuery\n",
|
||||
"\n",
|
||||
"query = BoxMetadataQuery(\n",
|
||||
" template_key=\"enterprise_1234.myTemplate\",\n",
|
||||
" query=\"total >= :value\",\n",
|
||||
" query_params={\"value\": 100},\n",
|
||||
" ancestor_folder_id=\"260932470532\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = BoxBlobLoader(box_metadata_query=query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load\n",
|
||||
"\n",
|
||||
"#### BoxLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': 'https://dl.boxcloud.com/api/2.0/internal_files/1514555423624/versions/1663171610024/representations/extracted_text/content/', 'title': 'Invoice-A5555_txt'}, page_content='Vendor: AstroTech Solutions\\nInvoice Number: A5555\\n\\nLine Items:\\n - Gravitational Wave Detector Kit: $800\\n - Exoplanet Terrarium: $120\\nTotal: $920')"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Box\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': 'https://dl.boxcloud.com/api/2.0/internal_files/1514555423624/versions/1663171610024/representations/extracted_text/content/', 'title': 'Invoice-A5555_txt'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### BoxBlobLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BoxLoader and BoxBlobLoader\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The `langchain-box` package provides two methods to index your files from Box: `BoxLoader` and `BoxBlobLoader`. `BoxLoader` allows you to ingest text representations of files that have a text representation in Box. The `BoxBlobLoader` allows you download the blob for any document or image file for processing with the blob parser of your choice.\n",
|
||||
"\n",
|
||||
"This notebook details getting started with both of these. For detailed documentation of all BoxLoader features and configurations head to the API Reference pages for [BoxLoader](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.document_loaders.box.BoxLoader.html) and [BoxBlobLoader](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.blob_loaders.box.BoxBlobLoader.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"The `BoxLoader` class helps you get your unstructured content from Box in Langchain's `Document` format. You can do this with either a `List[str]` containing Box file IDs, or with a `str` containing a Box folder ID.\n",
|
||||
"\n",
|
||||
"The `BoxBlobLoader` class helps you get your unstructured content from Box in Langchain's `Blob` format. You can do this with a `List[str]` containing Box file IDs, a `str` containing a Box folder ID, a search query, or a `BoxMetadataQuery`.\n",
|
||||
"\n",
|
||||
"If getting files from a folder with folder ID, you can also set a `Bool` to tell the loader to get all sub-folders in that folder, as well.\n",
|
||||
"\n",
|
||||
":::info\n",
|
||||
"A Box instance can contain Petabytes of files, and folders can contain millions of files. Be intentional when choosing what folders you choose to index. And we recommend never getting all files from folder 0 recursively. Folder ID 0 is your root folder.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"The `BoxLoader` will skip files without a text representation, while the `BoxBlobLoader` will return blobs for all document and image files.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [BoxLoader](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.document_loaders.box.BoxLoader.html) | [langchain_box](https://python.langchain.com/api_reference/box/index.html) | ✅ | ❌ | ❌ |\n",
|
||||
"| [BoxBlobLoader](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.blob_loaders.box.BoxBlobLoader.html) | [langchain_box](https://python.langchain.com/api_reference/box/index.html) | ✅ | ❌ | ❌ |\n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Async Support\n",
|
||||
"| :---: | :---: | :---: |\n",
|
||||
"| BoxLoader | ✅ | ❌ |\n",
|
||||
"| BoxBlobLoader | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"In order to use the Box package, you will need a few things:\n",
|
||||
"\n",
|
||||
"* A Box account — If you are not a current Box customer or want to test outside of your production Box instance, you can use a [free developer account](https://account.box.com/signup/n/developer#ty9l3).\n",
|
||||
"* [A Box app](https://developer.box.com/guides/getting-started/first-application/) — This is configured in the [developer console](https://account.box.com/developers/console), and for Box AI, must have the `Manage AI` scope enabled. Here you will also select your authentication method\n",
|
||||
"* The app must be [enabled by the administrator](https://developer.box.com/guides/authorization/custom-app-approval/#manual-approval). For free developer accounts, this is whomever signed up for the account.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"For these examples, we will use [token authentication](https://developer.box.com/guides/authentication/tokens/developer-tokens). This can be used with any [authentication method](https://developer.box.com/guides/authentication/). Just get the token with whatever methodology. If you want to learn more about how to use other authentication types with `langchain-box`, visit the [Box provider](/docs/integrations/providers/box) document.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Blob(id='1514555423624' metadata={'source': 'https://app.box.com/0/260935730128/260931903795/Invoice-A5555.txt', 'name': 'Invoice-A5555.txt', 'file_size': 150} data=\"b'Vendor: AstroTech Solutions\\\\nInvoice Number: A5555\\\\n\\\\nLine Items:\\\\n - Gravitational Wave Detector Kit: $800\\\\n - Exoplanet Terrarium: $120\\\\nTotal: $920'\" mimetype='text/plain' path='https://app.box.com/0/260935730128/260931903795/Invoice-A5555.txt')\n",
|
||||
"Blob(id='1514553902288' metadata={'source': 'https://app.box.com/0/260935730128/260931903795/Invoice-B1234.txt', 'name': 'Invoice-B1234.txt', 'file_size': 168} data=\"b'Vendor: Galactic Gizmos Inc.\\\\nInvoice Number: B1234\\\\nPurchase Order Number: 001\\\\nLine Items:\\\\n - Quantum Flux Capacitor: $500\\\\n - Anti-Gravity Pen Set: $75\\\\nTotal: $575'\" mimetype='text/plain' path='https://app.box.com/0/260935730128/260931903795/Invoice-B1234.txt')\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Enter your Box Developer Token: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"box_developer_token = getpass.getpass(\"Enter your Box Developer Token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_box**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_box"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"### Load files\n",
|
||||
"\n",
|
||||
"If you wish to load files, you must provide the `List` of file ids at instantiation time.\n",
|
||||
"\n",
|
||||
"This requires 1 piece of information:\n",
|
||||
"\n",
|
||||
"* **box_file_ids** (`List[str]`)- A list of Box file IDs.\n",
|
||||
"\n",
|
||||
"#### BoxLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.document_loaders import BoxLoader\n",
|
||||
"\n",
|
||||
"box_file_ids = [\"1514555423624\", \"1514553902288\"]\n",
|
||||
"\n",
|
||||
"loader = BoxLoader(\n",
|
||||
" box_developer_token=box_developer_token,\n",
|
||||
" box_file_ids=box_file_ids,\n",
|
||||
" character_limit=10000, # Optional. Defaults to no limit\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### BoxBlobLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.blob_loaders import BoxBlobLoader\n",
|
||||
"\n",
|
||||
"box_file_ids = [\"1514555423624\", \"1514553902288\"]\n",
|
||||
"\n",
|
||||
"loader = BoxBlobLoader(\n",
|
||||
" box_developer_token=box_developer_token, box_file_ids=box_file_ids\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load from folder\n",
|
||||
"\n",
|
||||
"If you wish to load files from a folder, you must provide a `str` with the Box folder ID at instantiation time.\n",
|
||||
"\n",
|
||||
"This requires 1 piece of information:\n",
|
||||
"\n",
|
||||
"* **box_folder_id** (`str`)- A string containing a Box folder ID.\n",
|
||||
"\n",
|
||||
"#### BoxLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.document_loaders import BoxLoader\n",
|
||||
"\n",
|
||||
"box_folder_id = \"260932470532\"\n",
|
||||
"\n",
|
||||
"loader = BoxLoader(\n",
|
||||
" box_folder_id=box_folder_id,\n",
|
||||
" recursive=False, # Optional. return entire tree, defaults to False\n",
|
||||
" character_limit=10000, # Optional. Defaults to no limit\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### BoxBlobLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.blob_loaders import BoxBlobLoader\n",
|
||||
"\n",
|
||||
"box_folder_id = \"260932470532\"\n",
|
||||
"\n",
|
||||
"loader = BoxBlobLoader(\n",
|
||||
" box_folder_id=box_folder_id,\n",
|
||||
" recursive=False, # Optional. return entire tree, defaults to False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Search for files with BoxBlobLoader\n",
|
||||
"\n",
|
||||
"If you need to search for files, the `BoxBlobLoader` offers two methods. First you can perform a full text search with optional search options to narrow down that search.\n",
|
||||
"\n",
|
||||
"This requires 1 piece of information:\n",
|
||||
"\n",
|
||||
"* **query** (`str`)- A string containing the search query to perform.\n",
|
||||
"\n",
|
||||
"You can also provide a `BoxSearchOptions` object to narrow down that search\n",
|
||||
"* **box_search_options** (`BoxSearchOptions`)\n",
|
||||
"\n",
|
||||
"#### BoxBlobLoader search"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.blob_loaders import BoxBlobLoader\n",
|
||||
"from langchain_box.utilities import BoxSearchOptions, DocumentFiles, SearchTypeFilter\n",
|
||||
"\n",
|
||||
"box_folder_id = \"260932470532\"\n",
|
||||
"\n",
|
||||
"box_search_options = BoxSearchOptions(\n",
|
||||
" ancestor_folder_ids=[box_folder_id],\n",
|
||||
" search_type_filter=[SearchTypeFilter.FILE_CONTENT],\n",
|
||||
" created_date_range=[\"2023-01-01T00:00:00-07:00\", \"2024-08-01T00:00:00-07:00,\"],\n",
|
||||
" file_extensions=[DocumentFiles.DOCX, DocumentFiles.PDF],\n",
|
||||
" k=200,\n",
|
||||
" size_range=[1, 1000000],\n",
|
||||
" updated_data_range=None,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = BoxBlobLoader(\n",
|
||||
" box_developer_token=box_developer_token,\n",
|
||||
" query=\"Victor\",\n",
|
||||
" box_search_options=box_search_options,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also search for content based on Box Metadata. If your Box instance uses Metadata, you can search for any documents that have a specific Metadata Template attached that meet a certain criteria, like returning any invoices with a total greater than or equal to $500 that were created last quarter.\n",
|
||||
"\n",
|
||||
"This requires 1 piece of information:\n",
|
||||
"\n",
|
||||
"* **query** (`str`)- A string containing the search query to perform.\n",
|
||||
"\n",
|
||||
"You can also provide a `BoxSearchOptions` object to narrow down that search\n",
|
||||
"* **box_search_options** (`BoxSearchOptions`)\n",
|
||||
"\n",
|
||||
"#### BoxBlobLoader Metadata query"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_box.blob_loaders import BoxBlobLoader\n",
|
||||
"from langchain_box.utilities import BoxMetadataQuery\n",
|
||||
"\n",
|
||||
"query = BoxMetadataQuery(\n",
|
||||
" template_key=\"enterprise_1234.myTemplate\",\n",
|
||||
" query=\"total >= :value\",\n",
|
||||
" query_params={\"value\": 100},\n",
|
||||
" ancestor_folder_id=\"260932470532\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = BoxBlobLoader(box_metadata_query=query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load\n",
|
||||
"\n",
|
||||
"#### BoxLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': 'https://dl.boxcloud.com/api/2.0/internal_files/1514555423624/versions/1663171610024/representations/extracted_text/content/', 'title': 'Invoice-A5555_txt'}, page_content='Vendor: AstroTech Solutions\\nInvoice Number: A5555\\n\\nLine Items:\\n - Gravitational Wave Detector Kit: $800\\n - Exoplanet Terrarium: $120\\nTotal: $920')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': 'https://dl.boxcloud.com/api/2.0/internal_files/1514555423624/versions/1663171610024/representations/extracted_text/content/', 'title': 'Invoice-A5555_txt'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### BoxBlobLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Blob(id='1514555423624' metadata={'source': 'https://app.box.com/0/260935730128/260931903795/Invoice-A5555.txt', 'name': 'Invoice-A5555.txt', 'file_size': 150} data=\"b'Vendor: AstroTech Solutions\\\\nInvoice Number: A5555\\\\n\\\\nLine Items:\\\\n - Gravitational Wave Detector Kit: $800\\\\n - Exoplanet Terrarium: $120\\\\nTotal: $920'\" mimetype='text/plain' path='https://app.box.com/0/260935730128/260931903795/Invoice-A5555.txt')\n",
|
||||
"Blob(id='1514553902288' metadata={'source': 'https://app.box.com/0/260935730128/260931903795/Invoice-B1234.txt', 'name': 'Invoice-B1234.txt', 'file_size': 168} data=\"b'Vendor: Galactic Gizmos Inc.\\\\nInvoice Number: B1234\\\\nPurchase Order Number: 001\\\\nLine Items:\\\\n - Quantum Flux Capacitor: $500\\\\n - Anti-Gravity Pen Set: $75\\\\nTotal: $575'\" mimetype='text/plain' path='https://app.box.com/0/260935730128/260931903795/Invoice-B1234.txt')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for blob in loader.yield_blobs():\n",
|
||||
" print(f\"Blob({blob})\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load\n",
|
||||
"\n",
|
||||
"#### BoxLoader only"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extra fields\n",
|
||||
"\n",
|
||||
"All Box connectors offer the ability to select additional fields from the Box `FileFull` object to return as custom LangChain metadata. Each object accepts an optional `List[str]` called `extra_fields` containing the json key from the return object, like `extra_fields=[\"shared_link\"]`.\n",
|
||||
"\n",
|
||||
"The connector will add this field to the list of fields the integration needs to function and then add the results to the metadata returned in the `Document` or `Blob`, like `\"metadata\" : { \"source\" : \"source, \"shared_link\" : \"shared_link\" }`. If the field is unavailable for that file, it will be returned as an empty string, like `\"shared_link\" : \"\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all BoxLoader features and configurations head to the [API reference](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.document_loaders.box.BoxLoader.html)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Help\n",
|
||||
"\n",
|
||||
"If you have questions, you can check out our [developer documentation](https://developer.box.com) or reach out to use in our [developer community](https://community.box.com)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for blob in loader.yield_blobs():\n",
|
||||
" print(f\"Blob({blob})\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load\n",
|
||||
"\n",
|
||||
"#### BoxLoader only"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extra fields\n",
|
||||
"\n",
|
||||
"All Box connectors offer the ability to select additional fields from the Box `FileFull` object to return as custom LangChain metadata. Each object accepts an optional `List[str]` called `extra_fields` containing the json key from the return object, like `extra_fields=[\"shared_link\"]`. \n",
|
||||
"\n",
|
||||
"The connector will add this field to the list of fields the integration needs to function and then add the results to the metadata returned in the `Document` or `Blob`, like `\"metadata\" : { \"source\" : \"source, \"shared_link\" : \"shared_link\" }`. If the field is unavailable for that file, it will be returned as an empty string, like `\"shared_link\" : \"\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all BoxLoader features and configurations head to the [API reference](https://python.langchain.com/api_reference/box/document_loaders/langchain_box.document_loaders.box.BoxLoader.html)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Help\n",
|
||||
"\n",
|
||||
"If you have questions, you can check out our [developer documentation](https://developer.box.com) or reach out to use in our [developer community](https://community.box.com)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -1,243 +1,241 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BSHTMLLoader\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with BeautifulSoup4 [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all __ModuleName__Loader features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [BSHTMLLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| BSHTMLLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access BSHTMLLoader document loader you'll need to install the `langchain-community` integration package and the `bs4` python package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to use the `BSHTMLLoader` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **bs4**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community bs4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import BSHTMLLoader\n",
|
||||
"\n",
|
||||
"loader = BSHTMLLoader(\n",
|
||||
" file_path=\"./example_data/fake-content.html\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/fake-content.html', 'title': 'Test Title'}, page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n')"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# BSHTMLLoader\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with BeautifulSoup4 [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all __ModuleName__Loader features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [BSHTMLLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| BSHTMLLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access BSHTMLLoader document loader you'll need to install the `langchain-community` integration package and the `bs4` python package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to use the `BSHTMLLoader` class."
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': './example_data/fake-content.html', 'title': 'Test Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/fake-content.html', 'title': 'Test Title'}, page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n')"
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []\n",
|
||||
"page[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding separator to BS4\n",
|
||||
"\n",
|
||||
"We can also pass a separator to use when calling get_text on the soup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='\n",
|
||||
", Test Title, \n",
|
||||
", \n",
|
||||
", \n",
|
||||
", My First Heading, \n",
|
||||
", My first paragraph., \n",
|
||||
", \n",
|
||||
", \n",
|
||||
"' metadata={'source': './example_data/fake-content.html', 'title': 'Test Title'}\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **bs4**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community bs4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import BSHTMLLoader\n",
|
||||
"\n",
|
||||
"loader = BSHTMLLoader(\n",
|
||||
" file_path=\"./example_data/fake-content.html\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/fake-content.html', 'title': 'Test Title'}, page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': './example_data/fake-content.html', 'title': 'Test Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/fake-content.html', 'title': 'Test Title'}, page_content='\\nTest Title\\n\\n\\nMy First Heading\\nMy first paragraph.\\n\\n\\n')"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []\n",
|
||||
"page[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding separator to BS4\n",
|
||||
"\n",
|
||||
"We can also pass a separator to use when calling get_text on the soup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='\n",
|
||||
", Test Title, \n",
|
||||
", \n",
|
||||
", \n",
|
||||
", My First Heading, \n",
|
||||
", My first paragraph., \n",
|
||||
", \n",
|
||||
", \n",
|
||||
"' metadata={'source': './example_data/fake-content.html', 'title': 'Test Title'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = BSHTMLLoader(\n",
|
||||
" file_path=\"./example_data/fake-content.html\", get_text_separator=\", \"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all BSHTMLLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = BSHTMLLoader(\n",
|
||||
" file_path=\"./example_data/fake-content.html\", get_text_separator=\", \"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all BSHTMLLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -1,348 +1,346 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# JSONLoader\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with JSON [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all JSONLoader features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/json/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [JSONLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| JSONLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access JSON document loader you'll need to install the `langchain-community` integration package as well as the ``jq`` python package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are required to use the `JSONLoader` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **jq**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community jq "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import JSONLoader\n",
|
||||
"\n",
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat.json\",\n",
|
||||
" jq_schema=\".messages[].content\",\n",
|
||||
" text_content=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat.json', 'seq_num': 1}, page_content='Bye!')"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# JSONLoader\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with JSON [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all JSONLoader features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/json/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [JSONLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| JSONLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access JSON document loader you'll need to install the `langchain-community` integration package as well as the ``jq`` python package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are required to use the `JSONLoader` class."
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat.json', 'seq_num': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pages = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" pages.append(doc)\n",
|
||||
" if len(pages) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(pages)\n",
|
||||
"\n",
|
||||
" pages = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Read from JSON Lines file\n",
|
||||
"\n",
|
||||
"If you want to load documents from a JSON Lines file, you pass `json_lines=True`\n",
|
||||
"and specify `jq_schema` to extract `page_content` from a single JSON object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Bye!' metadata={'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat_messages.jsonl\",\n",
|
||||
" jq_schema=\".content\",\n",
|
||||
" text_content=False,\n",
|
||||
" json_lines=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Read specific content keys\n",
|
||||
"\n",
|
||||
"Another option is to set `jq_schema='.'` and provide a `content_key` in order to only load specific content:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='User 2' metadata={'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat_messages.jsonl\",\n",
|
||||
" jq_schema=\".\",\n",
|
||||
" content_key=\"sender_name\",\n",
|
||||
" json_lines=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## JSON file with jq schema `content_key`\n",
|
||||
"\n",
|
||||
"To load documents from a JSON file using the `content_key` within the jq schema, set `is_content_key_jq_parsable=True`. Ensure that `content_key` is compatible and can be parsed using the jq schema."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **jq**:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Bye!' metadata={'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat.json', 'seq_num': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat.json\",\n",
|
||||
" jq_schema=\".messages[]\",\n",
|
||||
" content_key=\".content\",\n",
|
||||
" is_content_key_jq_parsable=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extracting metadata\n",
|
||||
"\n",
|
||||
"Generally, we want to include metadata available in the JSON file into the documents that we create from the content.\n",
|
||||
"\n",
|
||||
"The following demonstrates how metadata can be extracted using the `JSONLoader`.\n",
|
||||
"\n",
|
||||
"There are some key changes to be noted. In the previous example where we didn't collect the metadata, we managed to directly specify in the schema where the value for the `page_content` can be extracted from.\n",
|
||||
"\n",
|
||||
"In this example, we have to tell the loader to iterate over the records in the `messages` field. The jq_schema then has to be `.messages[]`\n",
|
||||
"\n",
|
||||
"This allows us to pass the records (dict) into the `metadata_func` that has to be implemented. The `metadata_func` is responsible for identifying which pieces of information in the record should be included in the metadata stored in the final `Document` object.\n",
|
||||
"\n",
|
||||
"Additionally, we now have to explicitly specify in the loader, via the `content_key` argument, the key from the record where the value for the `page_content` needs to be extracted from."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community jq "
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat.json', 'seq_num': 1, 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import JSONLoader\n",
|
||||
"\n",
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat.json\",\n",
|
||||
" jq_schema=\".messages[].content\",\n",
|
||||
" text_content=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat.json', 'seq_num': 1}, page_content='Bye!')"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat.json', 'seq_num': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pages = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" pages.append(doc)\n",
|
||||
" if len(pages) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(pages)\n",
|
||||
"\n",
|
||||
" pages = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Read from JSON Lines file\n",
|
||||
"\n",
|
||||
"If you want to load documents from a JSON Lines file, you pass `json_lines=True`\n",
|
||||
"and specify `jq_schema` to extract `page_content` from a single JSON object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Bye!' metadata={'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat_messages.jsonl\",\n",
|
||||
" jq_schema=\".content\",\n",
|
||||
" text_content=False,\n",
|
||||
" json_lines=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Read specific content keys\n",
|
||||
"\n",
|
||||
"Another option is to set `jq_schema='.'` and provide a `content_key` in order to only load specific content:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='User 2' metadata={'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat_messages.jsonl', 'seq_num': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat_messages.jsonl\",\n",
|
||||
" jq_schema=\".\",\n",
|
||||
" content_key=\"sender_name\",\n",
|
||||
" json_lines=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## JSON file with jq schema `content_key`\n",
|
||||
"\n",
|
||||
"To load documents from a JSON file using the `content_key` within the jq schema, set `is_content_key_jq_parsable=True`. Ensure that `content_key` is compatible and can be parsed using the jq schema."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Bye!' metadata={'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat.json', 'seq_num': 1}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat.json\",\n",
|
||||
" jq_schema=\".messages[]\",\n",
|
||||
" content_key=\".content\",\n",
|
||||
" is_content_key_jq_parsable=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extracting metadata\n",
|
||||
"\n",
|
||||
"Generally, we want to include metadata available in the JSON file into the documents that we create from the content.\n",
|
||||
"\n",
|
||||
"The following demonstrates how metadata can be extracted using the `JSONLoader`.\n",
|
||||
"\n",
|
||||
"There are some key changes to be noted. In the previous example where we didn't collect the metadata, we managed to directly specify in the schema where the value for the `page_content` can be extracted from.\n",
|
||||
"\n",
|
||||
"In this example, we have to tell the loader to iterate over the records in the `messages` field. The jq_schema then has to be `.messages[]`\n",
|
||||
"\n",
|
||||
"This allows us to pass the records (dict) into the `metadata_func` that has to be implemented. The `metadata_func` is responsible for identifying which pieces of information in the record should be included in the metadata stored in the final `Document` object.\n",
|
||||
"\n",
|
||||
"Additionally, we now have to explicitly specify in the loader, via the `content_key` argument, the key from the record where the value for the `page_content` needs to be extracted from."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': '/Users/isaachershenson/Documents/langchain/docs/docs/integrations/document_loaders/example_data/facebook_chat.json', 'seq_num': 1, 'sender_name': 'User 2', 'timestamp_ms': 1675597571851}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Define the metadata extraction function.\n",
|
||||
"def metadata_func(record: dict, metadata: dict) -> dict:\n",
|
||||
" metadata[\"sender_name\"] = record.get(\"sender_name\")\n",
|
||||
" metadata[\"timestamp_ms\"] = record.get(\"timestamp_ms\")\n",
|
||||
"\n",
|
||||
" return metadata\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat.json\",\n",
|
||||
" jq_schema=\".messages[]\",\n",
|
||||
" content_key=\"content\",\n",
|
||||
" metadata_func=metadata_func,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all JSONLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Define the metadata extraction function.\n",
|
||||
"def metadata_func(record: dict, metadata: dict) -> dict:\n",
|
||||
" metadata[\"sender_name\"] = record.get(\"sender_name\")\n",
|
||||
" metadata[\"timestamp_ms\"] = record.get(\"timestamp_ms\")\n",
|
||||
"\n",
|
||||
" return metadata\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"loader = JSONLoader(\n",
|
||||
" file_path=\"./example_data/facebook_chat.json\",\n",
|
||||
" jq_schema=\".messages[]\",\n",
|
||||
" content_key=\"content\",\n",
|
||||
" metadata_func=metadata_func,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all JSONLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -1,178 +1,176 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MathPixPDFLoader\n",
|
||||
"\n",
|
||||
"Inspired by Daniel Gross's snippet here: [https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21](https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [MathPixPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.MathpixPDFLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| MathPixPDFLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Sign up for Mathpix and [create an API key](https://mathpix.com/docs/ocr/creating-an-api-key) to set the `MATHPIX_API_KEY` variables in your environment"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MathPixPDFLoader\n",
|
||||
"\n",
|
||||
"Inspired by Daniel Gross's snippet here: [https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21](https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21)\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [MathPixPDFLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.MathpixPDFLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| MathPixPDFLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Sign up for Mathpix and [create an API key](https://mathpix.com/docs/ocr/creating-an-api-key) to set the `MATHPIX_API_KEY` variables in your environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"MATHPIX_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"MATHPIX_API_KEY\"] = getpass.getpass(\"Enter your Mathpix API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we are ready to initialize our loader:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import MathpixPDFLoader\n",
|
||||
"\n",
|
||||
"file_path = \"./example_data/layout-parser-paper.pdf\"\n",
|
||||
"loader = MathpixPDFLoader(file_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all MathpixPDFLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.MathpixPDFLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"MATHPIX_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"MATHPIX_API_KEY\"] = getpass.getpass(\"Enter your Mathpix API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we are ready to initialize our loader:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import MathpixPDFLoader\n",
|
||||
"\n",
|
||||
"file_path = \"./example_data/layout-parser-paper.pdf\"\n",
|
||||
"loader = MathpixPDFLoader(file_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all MathpixPDFLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.MathpixPDFLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are required to use PyMuPDFLoader"
|
||||
"No credentials are required to use PDFMinerLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -60,7 +60,7 @@
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **pymupdf**."
|
||||
"Install **langchain_community** and **pdfminer**."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -327,7 +327,7 @@
|
||||
"- By page\n",
|
||||
"- As a single text flow\n",
|
||||
"\n",
|
||||
"By default PDFPlumberLoader will split the PDF by page."
|
||||
"By default PDFMinerLoader will split the PDF by page."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,183 +1,181 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PDFPlumber\n",
|
||||
"\n",
|
||||
"Like PyMuPDF, the output Documents contain detailed metadata about the PDF and its pages, and returns one document per page.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PDFPlumberLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PDFPlumberLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| PDFPlumberLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to use this loader."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import PDFPlumberLoader\n",
|
||||
"\n",
|
||||
"loader = PDFPlumberLoader(\"./example_data/layout-parser-paper.pdf\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'file_path': './example_data/layout-parser-paper.pdf', 'page': 0, 'total_pages': 16, 'Author': '', 'CreationDate': 'D:20210622012710Z', 'Creator': 'LaTeX with hyperref', 'Keywords': '', 'ModDate': 'D:20210622012710Z', 'PTEX.Fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'Producer': 'pdfTeX-1.40.21', 'Subject': '', 'Title': '', 'Trapped': 'False'}, page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1 Allen Institute for AI\\nshannons@allenai.org\\n2 Brown University\\nruochen zhang@brown.edu\\n3 Harvard University\\n{melissadell,jacob carlson}@fas.harvard.edu\\n4 University of Washington\\nbcgl@cs.washington.edu\\n5 University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recentadvancesindocumentimageanalysis(DIA)havebeen\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomescouldbeeasilydeployedinproductionandextendedforfurther\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportantinnovationsbyawideaudience.Thoughtherehavebeenon-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopmentindisciplineslikenaturallanguageprocessingandcomputer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademicresearchacross awiderangeof disciplinesinthesocialsciences\\nand humanities. This paper introduces LayoutParser, an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitiveinterfacesforapplyingandcustomizingDLmodelsforlayoutde-\\ntection,characterrecognition,andmanyotherdocumentprocessingtasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io.\\nKeywords: DocumentImageAnalysis·DeepLearning·LayoutAnalysis\\n· Character Recognition · Open Source library · Toolkit.\\n1 Introduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocumentimageanalysis(DIA)tasksincludingdocumentimageclassification[11,\\n1202\\nnuJ\\n12\\n]VC.sc[\\n2v84351.3012:viXra\\n')"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PDFPlumber\n",
|
||||
"\n",
|
||||
"Like PyMuPDF, the output Documents contain detailed metadata about the PDF and its pages, and returns one document per page.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PDFPlumberLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PDFPlumberLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| PDFPlumberLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to use this loader."
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': './example_data/layout-parser-paper.pdf', 'file_path': './example_data/layout-parser-paper.pdf', 'page': 0, 'total_pages': 16, 'Author': '', 'CreationDate': 'D:20210622012710Z', 'Creator': 'LaTeX with hyperref', 'Keywords': '', 'ModDate': 'D:20210622012710Z', 'PTEX.Fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'Producer': 'pdfTeX-1.40.21', 'Subject': '', 'Title': '', 'Trapped': 'False'}\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import PDFPlumberLoader\n",
|
||||
"\n",
|
||||
"loader = PDFPlumberLoader(\"./example_data/layout-parser-paper.pdf\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/layout-parser-paper.pdf', 'file_path': './example_data/layout-parser-paper.pdf', 'page': 0, 'total_pages': 16, 'Author': '', 'CreationDate': 'D:20210622012710Z', 'Creator': 'LaTeX with hyperref', 'Keywords': '', 'ModDate': 'D:20210622012710Z', 'PTEX.Fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'Producer': 'pdfTeX-1.40.21', 'Subject': '', 'Title': '', 'Trapped': 'False'}, page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1 ((cid:0)), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1 Allen Institute for AI\\nshannons@allenai.org\\n2 Brown University\\nruochen zhang@brown.edu\\n3 Harvard University\\n{melissadell,jacob carlson}@fas.harvard.edu\\n4 University of Washington\\nbcgl@cs.washington.edu\\n5 University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recentadvancesindocumentimageanalysis(DIA)havebeen\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomescouldbeeasilydeployedinproductionandextendedforfurther\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportantinnovationsbyawideaudience.Thoughtherehavebeenon-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopmentindisciplineslikenaturallanguageprocessingandcomputer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademicresearchacross awiderangeof disciplinesinthesocialsciences\\nand humanities. This paper introduces LayoutParser, an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitiveinterfacesforapplyingandcustomizingDLmodelsforlayoutde-\\ntection,characterrecognition,andmanyotherdocumentprocessingtasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io.\\nKeywords: DocumentImageAnalysis·DeepLearning·LayoutAnalysis\\n· Character Recognition · Open Source library · Toolkit.\\n1 Introduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocumentimageanalysis(DIA)tasksincludingdocumentimageclassification[11,\\n1202\\nnuJ\\n12\\n]VC.sc[\\n2v84351.3012:viXra\\n')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': './example_data/layout-parser-paper.pdf', 'file_path': './example_data/layout-parser-paper.pdf', 'page': 0, 'total_pages': 16, 'Author': '', 'CreationDate': 'D:20210622012710Z', 'Creator': 'LaTeX with hyperref', 'Keywords': '', 'ModDate': 'D:20210622012710Z', 'PTEX.Fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'Producer': 'pdfTeX-1.40.21', 'Subject': '', 'Title': '', 'Trapped': 'False'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all PDFPlumberLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PDFPlumberLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all PDFPlumberLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PDFPlumberLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@
|
||||
"- By page\n",
|
||||
"- As a single text flow\n",
|
||||
"\n",
|
||||
"By default PDFPlumberLoader will split the PDF by page."
|
||||
"By default PyMuPDFLoader will split the PDF by page."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,230 +1,228 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PyPDFDirectoryLoader\n",
|
||||
"\n",
|
||||
"This loader loads all PDF files from a specific directory.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PyPDFDirectoryLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| PyPDFDirectoryLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed for this loader."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:08.878423Z",
|
||||
"start_time": "2025-01-21T08:00:08.876042Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 1
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:12.003718Z",
|
||||
"start_time": "2025-01-21T08:00:10.291617Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": "%pip install -qU langchain_community pypdf pillow",
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 2
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:18.512061Z",
|
||||
"start_time": "2025-01-21T08:00:17.313969Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import PyPDFDirectoryLoader\n",
|
||||
"\n",
|
||||
"directory_path = (\n",
|
||||
" \"../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\"\n",
|
||||
")\n",
|
||||
"loader = PyPDFDirectoryLoader(\"example_data/\")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 3
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:23.549752Z",
|
||||
"start_time": "2025-01-21T08:00:23.129010Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': '/False', 'source': 'example_data/layout-parser-paper.pdf', 'total_pages': 16, 'page': 0, 'page_label': '1'}, page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1 (\\x00 ), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1 Allen Institute for AI\\nshannons@allenai.org\\n2 Brown University\\nruochen zhang@brown.edu\\n3 Harvard University\\n{melissadell,jacob carlson}@fas.harvard.edu\\n4 University of Washington\\nbcgl@cs.washington.edu\\n5 University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recent advances in document image analysis (DIA) have been\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomes could be easily deployed in production and extended for further\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportant innovations by a wide audience. Though there have been on-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopment in disciplines like natural language processing and computer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademic research across a wide range of disciplines in the social sciences\\nand humanities. This paper introduces LayoutParser, an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitive interfaces for applying and customizing DL models for layout de-\\ntection, character recognition, and many other document processing tasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io.\\nKeywords: Document Image Analysis · Deep Learning · Layout Analysis\\n· Character Recognition · Open Source library · Toolkit.\\n1 Introduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocument image analysis (DIA) tasks including document image classification [11,\\narXiv:2103.15348v2 [cs.CV] 21 Jun 2021')"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PyPDFDirectoryLoader\n",
|
||||
"\n",
|
||||
"This loader loads all PDF files from a specific directory.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | JS support|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [PyPDFDirectoryLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| PyPDFDirectoryLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed for this loader."
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"execution_count": 4
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:26.612346Z",
|
||||
"start_time": "2025-01-21T08:00:26.609051Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
],
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': '/False', 'source': 'example_data/layout-parser-paper.pdf', 'total_pages': 16, 'page': 0, 'page_label': '1'}\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:08.878423Z",
|
||||
"start_time": "2025-01-21T08:00:08.876042Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:12.003718Z",
|
||||
"start_time": "2025-01-21T08:00:10.291617Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": "%pip install -qU langchain_community pypdf pillow"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:18.512061Z",
|
||||
"start_time": "2025-01-21T08:00:17.313969Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import PyPDFDirectoryLoader\n",
|
||||
"\n",
|
||||
"directory_path = (\n",
|
||||
" \"../../docs/integrations/document_loaders/example_data/layout-parser-paper.pdf\"\n",
|
||||
")\n",
|
||||
"loader = PyPDFDirectoryLoader(\"example_data/\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:23.549752Z",
|
||||
"start_time": "2025-01-21T08:00:23.129010Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': '/False', 'source': 'example_data/layout-parser-paper.pdf', 'total_pages': 16, 'page': 0, 'page_label': '1'}, page_content='LayoutParser: A Unified Toolkit for Deep\\nLearning Based Document Image Analysis\\nZejiang Shen1 (\\x00 ), Ruochen Zhang2, Melissa Dell3, Benjamin Charles Germain\\nLee4, Jacob Carlson3, and Weining Li5\\n1 Allen Institute for AI\\nshannons@allenai.org\\n2 Brown University\\nruochen zhang@brown.edu\\n3 Harvard University\\n{melissadell,jacob carlson}@fas.harvard.edu\\n4 University of Washington\\nbcgl@cs.washington.edu\\n5 University of Waterloo\\nw422li@uwaterloo.ca\\nAbstract. Recent advances in document image analysis (DIA) have been\\nprimarily driven by the application of neural networks. Ideally, research\\noutcomes could be easily deployed in production and extended for further\\ninvestigation. However, various factors like loosely organized codebases\\nand sophisticated model configurations complicate the easy reuse of im-\\nportant innovations by a wide audience. Though there have been on-going\\nefforts to improve reusability and simplify deep learning (DL) model\\ndevelopment in disciplines like natural language processing and computer\\nvision, none of them are optimized for challenges in the domain of DIA.\\nThis represents a major gap in the existing toolkit, as DIA is central to\\nacademic research across a wide range of disciplines in the social sciences\\nand humanities. This paper introduces LayoutParser, an open-source\\nlibrary for streamlining the usage of DL in DIA research and applica-\\ntions. The core LayoutParser library comes with a set of simple and\\nintuitive interfaces for applying and customizing DL models for layout de-\\ntection, character recognition, and many other document processing tasks.\\nTo promote extensibility, LayoutParser also incorporates a community\\nplatform for sharing both pre-trained models and full document digiti-\\nzation pipelines. We demonstrate that LayoutParser is helpful for both\\nlightweight and large-scale digitization pipelines in real-word use cases.\\nThe library is publicly available at https://layout-parser.github.io.\\nKeywords: Document Image Analysis · Deep Learning · Layout Analysis\\n· Character Recognition · Open Source library · Toolkit.\\n1 Introduction\\nDeep Learning(DL)-based approaches are the state-of-the-art for a wide range of\\ndocument image analysis (DIA) tasks including document image classification [11,\\narXiv:2103.15348v2 [cs.CV] 21 Jun 2021')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:26.612346Z",
|
||||
"start_time": "2025-01-21T08:00:26.609051Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': '/False', 'source': 'example_data/layout-parser-paper.pdf', 'total_pages': 16, 'page': 0, 'page_label': '1'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:30.251598Z",
|
||||
"start_time": "2025-01-21T08:00:29.972141Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all PyPDFDirectoryLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": ""
|
||||
}
|
||||
],
|
||||
"execution_count": 5
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-01-21T08:00:30.251598Z",
|
||||
"start_time": "2025-01-21T08:00:29.972141Z"
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 6
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all PyPDFDirectoryLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.pdf.PyPDFDirectoryLoader.html"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": ""
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,380 +1,378 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Sitemap\n",
|
||||
"\n",
|
||||
"Extends from the `WebBaseLoader`, `SitemapLoader` loads a sitemap from a given URL, and then scrapes and loads all pages in the sitemap, returning each page as a Document.\n",
|
||||
"\n",
|
||||
"The scraping is done concurrently. There are reasonable limits to concurrent requests, defaulting to 2 per second. If you aren't concerned about being a good citizen, or you control the scrapped server, or don't care about load you can increase this limit. Note, while this will speed up the scraping process, it may cause the server to block you. Be careful!\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/sitemap/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [SiteMapLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.sitemap.SitemapLoader.html#langchain_community.document_loaders.sitemap.SitemapLoader) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| SiteMapLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access SiteMap document loader you'll need to install the `langchain-community` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to run this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Fix notebook asyncio bug"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import nest_asyncio\n",
|
||||
"\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.sitemap import SitemapLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sitemap_loader = SitemapLoader(web_path=\"https://api.python.langchain.com/sitemap.xml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Fetching pages: 100%|##########| 28/28 [00:04<00:00, 6.83it/s]\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Sitemap\n",
|
||||
"\n",
|
||||
"Extends from the `WebBaseLoader`, `SitemapLoader` loads a sitemap from a given URL, and then scrapes and loads all pages in the sitemap, returning each page as a Document.\n",
|
||||
"\n",
|
||||
"The scraping is done concurrently. There are reasonable limits to concurrent requests, defaulting to 2 per second. If you aren't concerned about being a good citizen, or you control the scrapped server, or don't care about load you can increase this limit. Note, while this will speed up the scraping process, it may cause the server to block you. Be careful!\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/sitemap/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [SiteMapLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.sitemap.SitemapLoader.html#langchain_community.document_loaders.sitemap.SitemapLoader) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| SiteMapLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access SiteMap document loader you'll need to install the `langchain-community` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to run this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': 'https://api.python.langchain.com/en/stable/', 'loc': 'https://api.python.langchain.com/en/stable/', 'lastmod': '2024-05-15T00:29:42.163001+00:00', 'changefreq': 'weekly', 'priority': '1'}, page_content='\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLangChain Python API Reference Documentation.\\n\\n\\nYou will be automatically redirected to the new location of this page.\\n\\n')"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = sitemap_loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': 'https://api.python.langchain.com/en/stable/', 'loc': 'https://api.python.langchain.com/en/stable/', 'lastmod': '2024-05-15T00:29:42.163001+00:00', 'changefreq': 'weekly', 'priority': '1'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can change the `requests_per_second` parameter to increase the max concurrent requests. and use `requests_kwargs` to pass kwargs when send requests."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sitemap_loader.requests_per_second = 2\n",
|
||||
"# Optional: avoid `[SSL: CERTIFICATE_VERIFY_FAILED]` issue\n",
|
||||
"sitemap_loader.requests_kwargs = {\"verify\": False}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load\n",
|
||||
"\n",
|
||||
"You can also load the pages lazily in order to minimize the memory load."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Fetching pages: 100%|##########| 28/28 [00:01<00:00, 19.06it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in sitemap_loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering sitemap URLs\n",
|
||||
"\n",
|
||||
"Sitemaps can be massive files, with thousands of URLs. Often you don't need every single one of them. You can filter the URLs by passing a list of strings or regex patterns to the `filter_urls` parameter. Only URLs that match one of the patterns will be loaded."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SitemapLoader(\n",
|
||||
" web_path=\"https://api.python.langchain.com/sitemap.xml\",\n",
|
||||
" filter_urls=[\"https://api.python.langchain.com/en/latest\"],\n",
|
||||
")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLangChain Python API Reference Documentation.\\n\\n\\nYou will be automatically redirected to the new location of this page.\\n\\n', metadata={'source': 'https://api.python.langchain.com/en/latest/', 'loc': 'https://api.python.langchain.com/en/latest/', 'lastmod': '2024-02-12T05:26:10.971077+00:00', 'changefreq': 'daily', 'priority': '0.9'})"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Fix notebook asyncio bug"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import nest_asyncio\n",
|
||||
"\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.sitemap import SitemapLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sitemap_loader = SitemapLoader(web_path=\"https://api.python.langchain.com/sitemap.xml\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Fetching pages: 100%|##########| 28/28 [00:04<00:00, 6.83it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': 'https://api.python.langchain.com/en/stable/', 'loc': 'https://api.python.langchain.com/en/stable/', 'lastmod': '2024-05-15T00:29:42.163001+00:00', 'changefreq': 'weekly', 'priority': '1'}, page_content='\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLangChain Python API Reference Documentation.\\n\\n\\nYou will be automatically redirected to the new location of this page.\\n\\n')"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = sitemap_loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': 'https://api.python.langchain.com/en/stable/', 'loc': 'https://api.python.langchain.com/en/stable/', 'lastmod': '2024-05-15T00:29:42.163001+00:00', 'changefreq': 'weekly', 'priority': '1'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can change the `requests_per_second` parameter to increase the max concurrent requests. and use `requests_kwargs` to pass kwargs when send requests."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sitemap_loader.requests_per_second = 2\n",
|
||||
"# Optional: avoid `[SSL: CERTIFICATE_VERIFY_FAILED]` issue\n",
|
||||
"sitemap_loader.requests_kwargs = {\"verify\": False}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load\n",
|
||||
"\n",
|
||||
"You can also load the pages lazily in order to minimize the memory load."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Fetching pages: 100%|##########| 28/28 [00:01<00:00, 19.06it/s]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in sitemap_loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filtering sitemap URLs\n",
|
||||
"\n",
|
||||
"Sitemaps can be massive files, with thousands of URLs. Often you don't need every single one of them. You can filter the URLs by passing a list of strings or regex patterns to the `filter_urls` parameter. Only URLs that match one of the patterns will be loaded."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SitemapLoader(\n",
|
||||
" web_path=\"https://api.python.langchain.com/sitemap.xml\",\n",
|
||||
" filter_urls=[\"https://api.python.langchain.com/en/latest\"],\n",
|
||||
")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nLangChain Python API Reference Documentation.\\n\\n\\nYou will be automatically redirected to the new location of this page.\\n\\n', metadata={'source': 'https://api.python.langchain.com/en/latest/', 'loc': 'https://api.python.langchain.com/en/latest/', 'lastmod': '2024-02-12T05:26:10.971077+00:00', 'changefreq': 'daily', 'priority': '0.9'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"documents[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add custom scraping rules\n",
|
||||
"\n",
|
||||
"The `SitemapLoader` uses `beautifulsoup4` for the scraping process, and it scrapes every element on the page by default. The `SitemapLoader` constructor accepts a custom scraping function. This feature can be helpful to tailor the scraping process to your specific needs; for example, you might want to avoid scraping headers or navigation elements.\n",
|
||||
"\n",
|
||||
" The following example shows how to develop and use a custom function to avoid navigation and header elements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import the `beautifulsoup4` library and define the custom function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install beautifulsoup4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def remove_nav_and_header_elements(content: BeautifulSoup) -> str:\n",
|
||||
" # Find all 'nav' and 'header' elements in the BeautifulSoup object\n",
|
||||
" nav_elements = content.find_all(\"nav\")\n",
|
||||
" header_elements = content.find_all(\"header\")\n",
|
||||
"\n",
|
||||
" # Remove each 'nav' and 'header' element from the BeautifulSoup object\n",
|
||||
" for element in nav_elements + header_elements:\n",
|
||||
" element.decompose()\n",
|
||||
"\n",
|
||||
" return str(content.get_text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add your custom function to the `SitemapLoader` object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SitemapLoader(\n",
|
||||
" \"https://api.python.langchain.com/sitemap.xml\",\n",
|
||||
" filter_urls=[\"https://api.python.langchain.com/en/latest/\"],\n",
|
||||
" parsing_function=remove_nav_and_header_elements,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Local Sitemap\n",
|
||||
"\n",
|
||||
"The sitemap loader can also be used to load local files."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sitemap_loader = SitemapLoader(web_path=\"example_data/sitemap.xml\", is_local=True)\n",
|
||||
"\n",
|
||||
"docs = sitemap_loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all SiteMapLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.sitemap.SitemapLoader.html#langchain_community.document_loaders.sitemap.SitemapLoader"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"documents[0]"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Add custom scraping rules\n",
|
||||
"\n",
|
||||
"The `SitemapLoader` uses `beautifulsoup4` for the scraping process, and it scrapes every element on the page by default. The `SitemapLoader` constructor accepts a custom scraping function. This feature can be helpful to tailor the scraping process to your specific needs; for example, you might want to avoid scraping headers or navigation elements.\n",
|
||||
"\n",
|
||||
" The following example shows how to develop and use a custom function to avoid navigation and header elements."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Import the `beautifulsoup4` library and define the custom function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install beautifulsoup4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def remove_nav_and_header_elements(content: BeautifulSoup) -> str:\n",
|
||||
" # Find all 'nav' and 'header' elements in the BeautifulSoup object\n",
|
||||
" nav_elements = content.find_all(\"nav\")\n",
|
||||
" header_elements = content.find_all(\"header\")\n",
|
||||
"\n",
|
||||
" # Remove each 'nav' and 'header' element from the BeautifulSoup object\n",
|
||||
" for element in nav_elements + header_elements:\n",
|
||||
" element.decompose()\n",
|
||||
"\n",
|
||||
" return str(content.get_text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Add your custom function to the `SitemapLoader` object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SitemapLoader(\n",
|
||||
" \"https://api.python.langchain.com/sitemap.xml\",\n",
|
||||
" filter_urls=[\"https://api.python.langchain.com/en/latest/\"],\n",
|
||||
" parsing_function=remove_nav_and_header_elements,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Local Sitemap\n",
|
||||
"\n",
|
||||
"The sitemap loader can also be used to load local files."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sitemap_loader = SitemapLoader(web_path=\"example_data/sitemap.xml\", is_local=True)\n",
|
||||
"\n",
|
||||
"docs = sitemap_loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all SiteMapLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.sitemap.SitemapLoader.html#langchain_community.document_loaders.sitemap.SitemapLoader"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -1,269 +1,267 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# UnstructuredMarkdownLoader\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with UnstructuredMarkdown [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all __ModuleName__Loader features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/unstructured/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [UnstructuredMarkdownLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| UnstructuredMarkdownLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access UnstructuredMarkdownLoader document loader you'll need to install the `langchain-community` integration package and the `unstructured` python package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to use this loader."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **unstructured**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents. \n",
|
||||
"\n",
|
||||
"You can run the loader in one of two modes: \"single\" and \"elements\". If you use \"single\" mode, the document will be returned as a single `Document` object. If you use \"elements\" mode, the unstructured library will split the document into elements such as `Title` and `NarrativeText`. You can pass in additional `unstructured` kwargs after mode to apply different `unstructured` settings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredMarkdownLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredMarkdownLoader(\n",
|
||||
" \"./example_data/example.md\",\n",
|
||||
" mode=\"single\",\n",
|
||||
" strategy=\"fast\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/example.md'}, page_content='Sample Markdown Document\\n\\nIntroduction\\n\\nWelcome to this sample Markdown document. Markdown is a lightweight markup language used for formatting text. It\\'s widely used for documentation, readme files, and more.\\n\\nFeatures\\n\\nHeaders\\n\\nMarkdown supports multiple levels of headers:\\n\\nHeader 1: # Header 1\\n\\nHeader 2: ## Header 2\\n\\nHeader 3: ### Header 3\\n\\nLists\\n\\nUnordered List\\n\\nItem 1\\n\\nItem 2\\n\\nSubitem 2.1\\n\\nSubitem 2.2\\n\\nOrdered List\\n\\nFirst item\\n\\nSecond item\\n\\nThird item\\n\\nLinks\\n\\nOpenAI is an AI research organization.\\n\\nImages\\n\\nHere\\'s an example image:\\n\\nCode\\n\\nInline Code\\n\\nUse code for inline code snippets.\\n\\nCode Block\\n\\n```python def greet(name): return f\"Hello, {name}!\"\\n\\nprint(greet(\"World\")) ```')"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# UnstructuredMarkdownLoader\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with UnstructuredMarkdown [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all __ModuleName__Loader features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/unstructured/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [UnstructuredMarkdownLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ❌ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| UnstructuredMarkdownLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access UnstructuredMarkdownLoader document loader you'll need to install the `langchain-community` integration package and the `unstructured` python package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to use this loader."
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': './example_data/example.md'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/example.md', 'link_texts': ['OpenAI'], 'link_urls': ['https://www.openai.com'], 'last_modified': '2024-08-14T15:04:18', 'languages': ['eng'], 'parent_id': 'de1f74bf226224377ab4d8b54f215bb9', 'filetype': 'text/markdown', 'file_directory': './example_data', 'filename': 'example.md', 'category': 'NarrativeText', 'element_id': '898a542a261f7dc65e0072d1e847d535'}, page_content='OpenAI is an AI research organization.')"
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []\n",
|
||||
"page[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Elements\n",
|
||||
"\n",
|
||||
"In this example we will load in the `elements` mode, which will return a list of the different elements in the markdown document:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"29"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community** and **unstructured**"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredMarkdownLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredMarkdownLoader(\n",
|
||||
" \"./example_data/example.md\",\n",
|
||||
" mode=\"elements\",\n",
|
||||
" strategy=\"fast\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As you see there are 29 elements that were pulled from the `example.md` file. The first element is the title of the document as expected:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Sample Markdown Document'"
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community unstructured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents. \n",
|
||||
"\n",
|
||||
"You can run the loader in one of two modes: \"single\" and \"elements\". If you use \"single\" mode, the document will be returned as a single `Document` object. If you use \"elements\" mode, the unstructured library will split the document into elements such as `Title` and `NarrativeText`. You can pass in additional `unstructured` kwargs after mode to apply different `unstructured` settings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredMarkdownLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredMarkdownLoader(\n",
|
||||
" \"./example_data/example.md\",\n",
|
||||
" mode=\"single\",\n",
|
||||
" strategy=\"fast\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/example.md'}, page_content='Sample Markdown Document\\n\\nIntroduction\\n\\nWelcome to this sample Markdown document. Markdown is a lightweight markup language used for formatting text. It\\'s widely used for documentation, readme files, and more.\\n\\nFeatures\\n\\nHeaders\\n\\nMarkdown supports multiple levels of headers:\\n\\nHeader 1: # Header 1\\n\\nHeader 2: ## Header 2\\n\\nHeader 3: ### Header 3\\n\\nLists\\n\\nUnordered List\\n\\nItem 1\\n\\nItem 2\\n\\nSubitem 2.1\\n\\nSubitem 2.2\\n\\nOrdered List\\n\\nFirst item\\n\\nSecond item\\n\\nThird item\\n\\nLinks\\n\\nOpenAI is an AI research organization.\\n\\nImages\\n\\nHere\\'s an example image:\\n\\nCode\\n\\nInline Code\\n\\nUse code for inline code snippets.\\n\\nCode Block\\n\\n```python def greet(name): return f\"Hello, {name}!\"\\n\\nprint(greet(\"World\")) ```')"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': './example_data/example.md'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/example.md', 'link_texts': ['OpenAI'], 'link_urls': ['https://www.openai.com'], 'last_modified': '2024-08-14T15:04:18', 'languages': ['eng'], 'parent_id': 'de1f74bf226224377ab4d8b54f215bb9', 'filetype': 'text/markdown', 'file_directory': './example_data', 'filename': 'example.md', 'category': 'NarrativeText', 'element_id': '898a542a261f7dc65e0072d1e847d535'}, page_content='OpenAI is an AI research organization.')"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []\n",
|
||||
"page[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load Elements\n",
|
||||
"\n",
|
||||
"In this example we will load in the `elements` mode, which will return a list of the different elements in the markdown document:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"29"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredMarkdownLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredMarkdownLoader(\n",
|
||||
" \"./example_data/example.md\",\n",
|
||||
" mode=\"elements\",\n",
|
||||
" strategy=\"fast\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As you see there are 29 elements that were pulled from the `example.md` file. The first element is the title of the document as expected:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Sample Markdown Document'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all UnstructuredMarkdownLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[0].page_content"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all UnstructuredMarkdownLoader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,202 +1,200 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ccbe2b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# UnstructuredXMLLoader\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with UnstructuredXMLLoader [document loader](https://python.langchain.com/docs/concepts/document_loaders). The `UnstructuredXMLLoader` is used to load `XML` files. The loader works with `.xml` files. The page content will be the text extracted from the XML tags.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/unstructured/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [UnstructuredXMLLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.xml.UnstructuredXMLLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| UnstructuredXMLLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access UnstructuredXMLLoader document loader you'll need to install the `langchain-community` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to use the UnstructuredXMLLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc4ba987",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9fa4d5e5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "38e53f22",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fcd320ec",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a102f199",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2d198582",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredXMLLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredXMLLoader(\n",
|
||||
" \"./example_data/factbook.xml\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9bbb463c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cd875e75",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/factbook.xml'}, page_content='United States\\n\\nWashington, DC\\n\\nJoe Biden\\n\\nBaseball\\n\\nCanada\\n\\nOttawa\\n\\nJustin Trudeau\\n\\nHockey\\n\\nFrance\\n\\nParis\\n\\nEmmanuel Macron\\n\\nSoccer\\n\\nTrinidad & Tobado\\n\\nPort of Spain\\n\\nKeith Rowley\\n\\nTrack & Field')"
|
||||
"cell_type": "markdown",
|
||||
"id": "72ccbe2b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# UnstructuredXMLLoader\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with UnstructuredXMLLoader [document loader](https://python.langchain.com/docs/concepts/document_loaders). The `UnstructuredXMLLoader` is used to load `XML` files. The loader works with `.xml` files. The page content will be the text extracted from the XML tags.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/unstructured/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [UnstructuredXMLLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.xml.UnstructuredXMLLoader.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| UnstructuredXMLLoader | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access UnstructuredXMLLoader document loader you'll need to install the `langchain-community` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"No credentials are needed to use the UnstructuredXMLLoader"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "79b52cc0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': './example_data/factbook.xml'}\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"id": "fc4ba987",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9fa4d5e5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "38e53f22",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fcd320ec",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a102f199",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2d198582",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import UnstructuredXMLLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredXMLLoader(\n",
|
||||
" \"./example_data/factbook.xml\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9bbb463c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cd875e75",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(metadata={'source': './example_data/factbook.xml'}, page_content='United States\\n\\nWashington, DC\\n\\nJoe Biden\\n\\nBaseball\\n\\nCanada\\n\\nOttawa\\n\\nJustin Trudeau\\n\\nHockey\\n\\nFrance\\n\\nParis\\n\\nEmmanuel Macron\\n\\nSoccer\\n\\nTrinidad & Tobado\\n\\nPort of Spain\\n\\nKeith Rowley\\n\\nTrack & Field')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "79b52cc0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'source': './example_data/factbook.xml'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "557608e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e3b9e75c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "712aa98f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__Loader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.xml.UnstructuredXMLLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "557608e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e3b9e75c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "712aa98f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__Loader features and configurations head to the API reference: https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.xml.UnstructuredXMLLoader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
"print(f\"Response provided by LLM with system prompt set is : {sys_resp}\")\n",
|
||||
"\n",
|
||||
"# The top_responses parameter can give multiple responses based on its parameter value\n",
|
||||
"# This below code retrive top 10 miner's response all the response are in format of json\n",
|
||||
"# This below code retrieve top 10 miner's response all the response are in format of json\n",
|
||||
"\n",
|
||||
"# Json response structure is\n",
|
||||
"\"\"\" {\n",
|
||||
|
||||
@@ -1,263 +1,261 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenAI\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of OpenAI [text completion models](/docs/concepts/text_llms). The latest and most popular OpenAI models are [chat completion models](/docs/concepts/chat_models).\n",
|
||||
"\n",
|
||||
"Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/openai/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of models with different levels of power suitable for different tasks.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `OpenAI` [models](https://platform.openai.com/docs/models)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74312161",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/openai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOpenAI](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [langchain-openai](https://python.langchain.com/api_reference/openai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to https://platform.openai.com to sign up to OpenAI and generate an API key. Once you've done this set the OPENAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "efcdb2b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5d528fa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "52fa46e8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0fad78d8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain OpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2e300149",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "129a3275",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Should you need to specify your organization ID, you can use the following cell. However, it is not required if you are only part of a single organization or intend to use your default organization. You can check your default organization [here](https://platform.openai.com/account/api-keys).\n",
|
||||
"\n",
|
||||
"To specify your organization, you can use this:\n",
|
||||
"```python\n",
|
||||
"OPENAI_ORGANIZATION = getpass()\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_ORGANIZATION\"] = OPENAI_ORGANIZATION\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "6fb585dd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "464003c1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "85b49da0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nI am an AI and do not have emotions like humans do, so I am always functioning at my optimal level. Thank you for asking! How can I assist you today?'"
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenAI\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"You are currently on a page documenting the use of OpenAI [text completion models](/docs/concepts/text_llms). The latest and most popular OpenAI models are [chat completion models](/docs/concepts/chat_models).\n",
|
||||
"\n",
|
||||
"Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/openai/).\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of models with different levels of power suitable for different tasks.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `OpenAI` [models](https://platform.openai.com/docs/models)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.invoke(\"Hello how are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b7e0dfc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "a641dbd9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nIch liebe Programmieren.'"
|
||||
"cell_type": "markdown",
|
||||
"id": "74312161",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/openai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatOpenAI](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html) | [langchain-openai](https://python.langchain.com/api_reference/openai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to https://platform.openai.com to sign up to OpenAI and generate an API key. Once you've done this set the OPENAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "efcdb2b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5d528fa",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "52fa46e8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0fad78d8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain OpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2e300149",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "129a3275",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Should you need to specify your organization ID, you can use the following cell. However, it is not required if you are only part of a single organization or intend to use your default organization. You can check your default organization [here](https://platform.openai.com/account/api-keys).\n",
|
||||
"\n",
|
||||
"To specify your organization, you can use this:\n",
|
||||
"```python\n",
|
||||
"OPENAI_ORGANIZATION = getpass()\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_ORGANIZATION\"] = OPENAI_ORGANIZATION\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "6fb585dd",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "464003c1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "85b49da0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nI am an AI and do not have emotions like humans do, so I am always functioning at my optimal level. Thank you for asking! How can I assist you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.invoke(\"Hello how are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b7e0dfc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "a641dbd9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nIch liebe Programmieren.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "58a9ddb1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a proxy\n",
|
||||
"\n",
|
||||
"If you are behind an explicit proxy, you can specify the http_client to pass through"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "55142cec",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install httpx\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"openai = OpenAI(\n",
|
||||
" model_name=\"gpt-3.5-turbo-instruct\",\n",
|
||||
" http_client=httpx.Client(proxies=\"http://proxy.yourcompany.com:8080\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "2fd99e97-013f-4c28-bb47-426faa42a2cf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `OpenAI` llm features and configurations head to the API reference: https://python.langchain.com/api_reference/openai/llms/langchain_openai.llms.base.OpenAI.html"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate.from_template(\"How to say {input} in {output_language}:\\n\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "58a9ddb1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using a proxy\n",
|
||||
"\n",
|
||||
"If you are behind an explicit proxy, you can specify the http_client to pass through"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "55142cec",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install httpx\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"openai = OpenAI(\n",
|
||||
" model_name=\"gpt-3.5-turbo-instruct\",\n",
|
||||
" http_client=httpx.Client(proxies=\"http://proxy.yourcompany.com:8080\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "2fd99e97-013f-4c28-bb47-426faa42a2cf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `OpenAI` llm features and configurations head to the API reference: https://python.langchain.com/api_reference/openai/llms/langchain_openai.llms.base.OpenAI.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
39
docs/docs/integrations/providers/goat.mdx
Normal file
39
docs/docs/integrations/providers/goat.mdx
Normal file
@@ -0,0 +1,39 @@
|
||||
# GOAT
|
||||
|
||||
[GOAT](https://github.com/goat-sdk/goat) is the finance toolkit for AI agents.
|
||||
|
||||
Create agents that can:
|
||||
|
||||
- Send and receive payments
|
||||
- Purchase physical and digital goods and services
|
||||
- Engage in various investment strategies:
|
||||
- Earn yield
|
||||
- Bet on prediction markets
|
||||
- Purchase crypto assets
|
||||
- Tokenize any asset
|
||||
- Get financial insights
|
||||
|
||||
### How it works
|
||||
GOAT leverages blockchains, cryptocurrencies (such as stablecoins), and wallets as the infrastructure to enable agents to become economic actors:
|
||||
|
||||
1. Give your agent a [wallet](https://github.com/goat-sdk/goat/tree/main#chains-and-wallets)
|
||||
2. Allow it to transact [anywhere](https://github.com/goat-sdk/goat/tree/main#chains-and-wallets)
|
||||
3. Use more than [+200 tools](https://github.com/goat-sdk/goat/tree/main#tools)
|
||||
|
||||
See everything GOAT supports [here](https://github.com/goat-sdk/goat/tree/main#chains-and-wallets).
|
||||
|
||||
**Lightweight and extendable**
|
||||
Different from other toolkits, GOAT is designed to be lightweight and extendable by keeping its core minimal and allowing you to install only the tools you need.
|
||||
|
||||
If you don't find what you need on our more than 200 integrations you can easily:
|
||||
|
||||
- Create your own plugin
|
||||
- Integrate a new chain
|
||||
- Integrate a new wallet
|
||||
- Integrate a new agent framework
|
||||
|
||||
See how to do it [here](https://github.com/goat-sdk/goat/tree/main#-contributing).
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Check out our [quickstart](https://github.com/goat-sdk/goat/tree/main/python/examples/by-framework/langchain) to see how to set up and install GOAT.
|
||||
73
docs/docs/integrations/providers/netmind.ipynb
Normal file
73
docs/docs/integrations/providers/netmind.ipynb
Normal file
@@ -0,0 +1,73 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Netmind\n",
|
||||
"\n",
|
||||
"[Netmind AI](https://www.netmind.ai/) Build AI Faster, Smarter, and More Affordably.\n",
|
||||
"Train, Fine-tune, Run Inference, and Scale with our Global GPU Network—Your all-in-one AI Engine.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with Netmind AI models.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Installation and Setup\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install langchain-netmind\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Get an Netmind api key and set it as an environment variable (`NETMIND_API_KEY`). \n",
|
||||
"Head to https://www.netmind.ai/ to sign up to Netmind and generate an API key. \n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Chat Models\n",
|
||||
"\n",
|
||||
"For more on Netmind chat models, visit the guide [here](/docs/integrations/chat/netmind)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Embedding Model\n",
|
||||
"\n",
|
||||
"For more on Netmind embedding models, visit the [guide](/docs/integrations/text_embedding/netmind)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
348
docs/docs/integrations/providers/vectara.ipynb
Normal file
348
docs/docs/integrations/providers/vectara.ipynb
Normal file
@@ -0,0 +1,348 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "559f8e0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vectara\n",
|
||||
"\n",
|
||||
"[Vectara](https://vectara.com/) is the trusted AI Assistant and Agent platform which focuses on enterprise readiness for mission-critical applications.\n",
|
||||
"Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:\n",
|
||||
"1. A way to extract text from files (PDF, PPT, DOCX, etc)\n",
|
||||
"2. ML-based chunking that provides state of the art performance.\n",
|
||||
"3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.\n",
|
||||
"4. Its own internal vector database where text chunks and embedding vectors are stored.\n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments, including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). \n",
|
||||
"6. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"For more information:\n",
|
||||
"- [Documentation](https://docs.vectara.com/docs/)\n",
|
||||
"- [API Playground](https://docs.vectara.com/docs/rest-api/)\n",
|
||||
"- [Quickstart](https://docs.vectara.com/docs/quickstart)\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the basic retrieval functionality, when utilizing Vectara just as a Vector Store (without summarization), incuding: `similarity_search` and `similarity_search_with_score` as well as using the LangChain `as_retriever` functionality.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use the `VectaraVectorStore` you first need to install the partner package.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dfdf03ba-d6f5-4b1e-86d3-a65c4bc99aa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!uv pip install -U pip && uv pip install -qU langchain-vectara"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e97dcf11",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Getting Started\n",
|
||||
"\n",
|
||||
"To get started, use the following steps:\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara trial.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these two values: `corpus_key` and `api_key`.\n",
|
||||
"You can provide `VECTARA_API_KEY` to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these two variables: `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
" For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"2. Add them to the `Vectara` vectorstore constructor:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectara = Vectara(\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"In this notebook we assume they are provided in the environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "aac7a9a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_KEY\"] = \"VECTARA_CORPUS_KEY\"\n",
|
||||
"\n",
|
||||
"from langchain_vectara import Vectara\n",
|
||||
"from langchain_vectara.vectorstores import (\n",
|
||||
" ChainReranker,\n",
|
||||
" CorpusConfig,\n",
|
||||
" CustomerSpecificReranker,\n",
|
||||
" File,\n",
|
||||
" GenerationConfig,\n",
|
||||
" MmrReranker,\n",
|
||||
" SearchConfig,\n",
|
||||
" VectaraQueryConfig,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vectara = Vectara(vectara_api_key=os.getenv(\"VECTARA_API_KEY\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "875ffb7e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we load the state-of-the-union text into Vectara.\n",
|
||||
"\n",
|
||||
"Note that we use the add_files interface which does not require any local processing or chunking - Vectara receives the file content and performs all the necessary pre-processing, chunking and embedding of the file into its knowledge store.\n",
|
||||
"\n",
|
||||
"In this case it uses a .txt file but the same works for many other [file types](https://docs.vectara.com/docs/api-reference/indexing-apis/file-upload/file-upload-filetypes)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "be0a4973",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['state_of_the_union.txt']"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"corpus_key = os.getenv(\"VECTARA_CORPUS_KEY\")\n",
|
||||
"file_obj = File(\n",
|
||||
" file_path=\"../document_loaders/example_data/state_of_the_union.txt\",\n",
|
||||
" metadata={\"source\": \"text_file\"},\n",
|
||||
")\n",
|
||||
"vectara.add_files([file_obj], corpus_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "22a6b953",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Vectara RAG (retrieval augmented generation)\n",
|
||||
"\n",
|
||||
"We now create a `VectaraQueryConfig` object to control the retrieval and summarization options:\n",
|
||||
"* We enable summarization, specifying we would like the LLM to pick the top 7 matching chunks and respond in English\n",
|
||||
"\n",
|
||||
"Using this configuration, let's create a LangChain `Runnable` object that encpasulates the full Vectara RAG pipeline, using the `as_rag` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "9ecda054-96a8-4a91-aeae-32006efb1ac8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"President Biden discussed several key issues in his recent statements. He emphasized the importance of keeping schools open and noted that with a high vaccination rate and reduced hospitalizations, most Americans can safely return to normal activities without masks [1]. He addressed the need to hold social media platforms accountable for their impact on children and called for stronger privacy protections and mental health services [2]. Biden also announced measures against Russia, including preventing its central bank from defending the Ruble and targeting Russian oligarchs' assets, as part of efforts to weaken Russia's economy and military [3]. Additionally, he highlighted the importance of protecting women's rights, specifically the right to choose as affirmed in Roe v. Wade [5]. Lastly, he advocated for funding the police with necessary resources and training to ensure community safety [6].\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generation_config = GenerationConfig(\n",
|
||||
" max_used_search_results=7,\n",
|
||||
" response_language=\"eng\",\n",
|
||||
" generation_preset_name=\"vectara-summary-ext-24-05-med-omni\",\n",
|
||||
" enable_factual_consistency_score=True,\n",
|
||||
")\n",
|
||||
"search_config = SearchConfig(\n",
|
||||
" corpora=[CorpusConfig(corpus_key=corpus_key)],\n",
|
||||
" limit=25,\n",
|
||||
" reranker=ChainReranker(\n",
|
||||
" rerankers=[\n",
|
||||
" CustomerSpecificReranker(reranker_id=\"rnk_272725719\", limit=100),\n",
|
||||
" MmrReranker(diversity_bias=0.2, limit=100),\n",
|
||||
" ]\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" search=search_config,\n",
|
||||
" generation=generation_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query_str = \"what did Biden say?\"\n",
|
||||
"\n",
|
||||
"rag = vectara.as_rag(config)\n",
|
||||
"rag.invoke(query_str)[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd825d63-93a0-4e45-a455-bfabb01ee1a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also use the streaming interface like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "27f01330-8917-4eff-b603-59ab2571a4d2",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"President Biden emphasized several key points in his statements. He highlighted the importance of keeping schools open and noted that with a high vaccination rate and reduced hospitalizations, most Americans can safely return to normal activities without masks [1]. He addressed the need to hold social media platforms accountable for their impact on children and called for stronger privacy protections and mental health services [2]. Biden also discussed measures against Russia, including preventing their central bank from defending the Ruble and targeting Russian oligarchs' assets [3]. Additionally, he reaffirmed the commitment to protect women's rights, particularly the right to choose as affirmed in Roe v. Wade [5]. Lastly, he advocated for funding the police to ensure community safety [6]."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = {}\n",
|
||||
"curr_key = None\n",
|
||||
"for chunk in rag.stream(query_str):\n",
|
||||
" for key in chunk:\n",
|
||||
" if key not in output:\n",
|
||||
" output[key] = chunk[key]\n",
|
||||
" else:\n",
|
||||
" output[key] += chunk[key]\n",
|
||||
" if key == \"answer\":\n",
|
||||
" print(chunk[key], end=\"\", flush=True)\n",
|
||||
" curr_key = key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f16bf8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more details about Vectara as VectorStore [go to this notebook](../vectorstores/vectara.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d49a91d2-9c53-48cb-8065-a3ba1292e8d0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Vectara Chat\n",
|
||||
"\n",
|
||||
"In most uses of LangChain to create chatbots, one must integrate a special `memory` component that maintains the history of chat sessions and then uses that history to ensure the chatbot is aware of conversation history.\n",
|
||||
"\n",
|
||||
"With Vectara Chat - all of that is performed in the backend by Vectara automatically."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "f57264ec-e8b5-4d55-9c16-54898d506f73",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The president stated that nominating someone to serve on the United States Supreme Court is one of the most serious constitutional responsibilities he has. He nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, describing her as one of the nation’s top legal minds who will continue Justice Breyer’s legacy of excellence [1].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generation_config = GenerationConfig(\n",
|
||||
" max_used_search_results=7,\n",
|
||||
" response_language=\"eng\",\n",
|
||||
" generation_preset_name=\"vectara-summary-ext-24-05-med-omni\",\n",
|
||||
" enable_factual_consistency_score=True,\n",
|
||||
")\n",
|
||||
"search_config = SearchConfig(\n",
|
||||
" corpora=[CorpusConfig(corpus_key=corpus_key, limit=25)],\n",
|
||||
" reranker=MmrReranker(diversity_bias=0.2),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" search=search_config,\n",
|
||||
" generation=generation_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"bot = vectara.as_chat(config)\n",
|
||||
"\n",
|
||||
"bot.invoke(\"What did the president say about Ketanji Brown Jackson?\")[\"answer\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "13714687-672d-47af-997a-61bb9dd66923",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more details about Vectara chat [go to this notebook](../chat/vectara.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "baf687dc-08c4-49af-98aa-0359e2591f2e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Vectara as self-querying retriever\n",
|
||||
"Vectara offers Intelligent Query Rewriting option which enhances search precision by automatically generating metadata filter expressions from natural language queries. This capability analyzes user queries, extracts relevant metadata filters, and rephrases the query to focus on the core information need. For more details [go to this notebook](../retrievers/self_query/vectara_self_query.ipynb)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8060a423-b291-4166-8fd7-ba0e01692b51",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,181 +0,0 @@
|
||||
# Vectara
|
||||
|
||||
>[Vectara](https://vectara.com/) provides a Trusted Generative AI platform, allowing organizations to rapidly create a ChatGPT-like experience (an AI assistant)
|
||||
> which is grounded in the data, documents, and knowledge that they have (technically, it is Retrieval-Augmented-Generation-as-a-service).
|
||||
|
||||
**Vectara Overview:**
|
||||
[Vectara](https://vectara.com/) is the trusted AI Assistant and Agent platform which focuses on enterprise readiness for mission-critical applications.
|
||||
Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:
|
||||
1. A way to extract text from files (PDF, PPT, DOCX, etc)
|
||||
2. ML-based chunking that provides state of the art performance.
|
||||
3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.
|
||||
4. Its own internal vector database where text chunks and embedding vectors are stored.
|
||||
5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments, including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking).
|
||||
6. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.
|
||||
|
||||
For more information:
|
||||
- [Documentation](https://docs.vectara.com/docs/)
|
||||
- [API Playground](https://docs.vectara.com/docs/rest-api/)
|
||||
- [Quickstart](https://docs.vectara.com/docs/quickstart)
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
To use `Vectara` with LangChain no special installation steps are required.
|
||||
To get started, [sign up](https://vectara.com/integrations/langchain) for a free Vectara trial,
|
||||
and follow the [quickstart](https://docs.vectara.com/docs/quickstart) guide to create a corpus and an API key.
|
||||
Once you have these, you can provide them as arguments to the Vectara `vectorstore`, or you can set them as environment variables.
|
||||
|
||||
- export `VECTARA_CUSTOMER_ID`="your_customer_id"
|
||||
- export `VECTARA_CORPUS_ID`="your_corpus_id"
|
||||
- export `VECTARA_API_KEY`="your-vectara-api-key"
|
||||
|
||||
## Vectara as a Vector Store
|
||||
|
||||
There exists a wrapper around the Vectara platform, allowing you to use it as a `vectorstore` in LangChain:
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain_community.vectorstores import Vectara
|
||||
```
|
||||
|
||||
To create an instance of the Vectara vectorstore:
|
||||
```python
|
||||
vectara = Vectara(
|
||||
vectara_customer_id=customer_id,
|
||||
vectara_corpus_id=corpus_id,
|
||||
vectara_api_key=api_key
|
||||
)
|
||||
```
|
||||
The `customer_id`, `corpus_id` and `api_key` are optional, and if they are not supplied will be read from
|
||||
the environment variables `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`, respectively.
|
||||
|
||||
### Adding Texts or Files
|
||||
|
||||
After you have the vectorstore, you can `add_texts` or `add_documents` as per the standard `VectorStore` interface, for example:
|
||||
|
||||
```python
|
||||
vectara.add_texts(["to be or not to be", "that is the question"])
|
||||
```
|
||||
|
||||
Since Vectara supports file-upload in the platform, we also added the ability to upload files (PDF, TXT, HTML, PPT, DOC, etc) directly.
|
||||
When using this method, each file is uploaded directly to the Vectara backend, processed and chunked optimally there, so you don't have to use the LangChain document loader or chunking mechanism.
|
||||
|
||||
As an example:
|
||||
|
||||
```python
|
||||
vectara.add_files(["path/to/file1.pdf", "path/to/file2.pdf",...])
|
||||
```
|
||||
|
||||
Of course you do not have to add any data, and instead just connect to an existing Vectara corpus where data may already be indexed.
|
||||
|
||||
### Querying the VectorStore
|
||||
|
||||
To query the Vectara vectorstore, you can use the `similarity_search` method (or `similarity_search_with_score`), which takes a query string and returns a list of results:
|
||||
```python
|
||||
results = vectara.similarity_search_with_score("what is LangChain?")
|
||||
```
|
||||
The results are returned as a list of relevant documents, and a relevance score of each document.
|
||||
|
||||
In this case, we used the default retrieval parameters, but you can also specify the following additional arguments in `similarity_search` or `similarity_search_with_score`:
|
||||
- `k`: number of results to return (defaults to 5)
|
||||
- `lambda_val`: the [lexical matching](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) factor for hybrid search (defaults to 0.025)
|
||||
- `filter`: a [filter](https://docs.vectara.com/docs/common-use-cases/filtering-by-metadata/filter-overview) to apply to the results (default None)
|
||||
- `n_sentence_context`: number of sentences to include before/after the actual matching segment when returning results. This defaults to 2.
|
||||
- `rerank_config`: can be used to specify reranker for thr results
|
||||
- `reranker`: mmr, rerank_multilingual_v1 or none. Note that "rerank_multilingual_v1" is a Scale only feature
|
||||
- `rerank_k`: number of results to use for reranking
|
||||
- `mmr_diversity_bias`: 0 = no diversity, 1 = full diversity. This is the lambda parameter in the MMR formula and is in the range 0...1
|
||||
|
||||
To get results without the relevance score, you can simply use the 'similarity_search' method:
|
||||
```python
|
||||
results = vectara.similarity_search("what is LangChain?")
|
||||
```
|
||||
|
||||
## Vectara for Retrieval Augmented Generation (RAG)
|
||||
|
||||
Vectara provides a full RAG pipeline, including generative summarization. To use it as a complete RAG solution, you can use the `as_rag` method.
|
||||
There are a few additional parameters that can be specified in the `VectaraQueryConfig` object to control retrieval and summarization:
|
||||
* k: number of results to return
|
||||
* lambda_val: the lexical matching factor for hybrid search
|
||||
* summary_config (optional): can be used to request an LLM summary in RAG
|
||||
- is_enabled: True or False
|
||||
- max_results: number of results to use for summary generation
|
||||
- response_lang: language of the response summary, in ISO 639-2 format (e.g. 'en', 'fr', 'de', etc)
|
||||
* rerank_config (optional): can be used to specify Vectara Reranker of the results
|
||||
- reranker: mmr, rerank_multilingual_v1 or none
|
||||
- rerank_k: number of results to use for reranking
|
||||
- mmr_diversity_bias: 0 = no diversity, 1 = full diversity.
|
||||
This is the lambda parameter in the MMR formula and is in the range 0...1
|
||||
|
||||
For example:
|
||||
|
||||
```python
|
||||
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
|
||||
rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
|
||||
config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
|
||||
```
|
||||
Then you can use the `as_rag` method to create a RAG pipeline:
|
||||
|
||||
```python
|
||||
query_str = "what did Biden say?"
|
||||
|
||||
rag = vectara.as_rag(config)
|
||||
rag.invoke(query_str)['answer']
|
||||
```
|
||||
|
||||
The `as_rag` method returns a `VectaraRAG` object, which behaves just like any LangChain Runnable, including the `invoke` or `stream` methods.
|
||||
|
||||
## Vectara Chat
|
||||
|
||||
The RAG functionality can be used to create a chatbot. For example, you can create a simple chatbot that responds to user input:
|
||||
|
||||
```python
|
||||
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
|
||||
rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
|
||||
config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
|
||||
|
||||
query_str = "what did Biden say?"
|
||||
bot = vectara.as_chat(config)
|
||||
bot.invoke(query_str)['answer']
|
||||
```
|
||||
|
||||
The main difference is the following: with `as_chat` Vectara internally tracks the chat history and conditions each response on the full chat history.
|
||||
There is no need to keep that history locally to LangChain, as Vectara will manage it internally.
|
||||
|
||||
## Vectara as a LangChain retriever only
|
||||
|
||||
If you want to use Vectara as a retriever only, you can use the `as_retriever` method, which returns a `VectaraRetriever` object.
|
||||
```python
|
||||
retriever = vectara.as_retriever(config=config)
|
||||
retriever.invoke(query_str)
|
||||
```
|
||||
|
||||
Like with as_rag, you provide a `VectaraQueryConfig` object to control the retrieval parameters.
|
||||
In most cases you would not enable the summary_config, but it is left as an option for backwards compatibility.
|
||||
If no summary is requested, the response will be a list of relevant documents, each with a relevance score.
|
||||
If a summary is requested, the response will be a list of relevant documents as before, plus an additional document that includes the generative summary.
|
||||
|
||||
## Hallucination Detection score
|
||||
|
||||
Vectara created [HHEM](https://huggingface.co/vectara/hallucination_evaluation_model) - an open source model that can be used to evaluate RAG responses for factual consistency.
|
||||
As part of the Vectara RAG, the "Factual Consistency Score" (or FCS), which is an improved version of the open source HHEM is made available via the API.
|
||||
This is automatically included in the output of the RAG pipeline
|
||||
|
||||
```python
|
||||
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang='eng')
|
||||
rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
|
||||
config = VectaraQueryConfig(k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config)
|
||||
|
||||
rag = vectara.as_rag(config)
|
||||
resp = rag.invoke(query_str)
|
||||
print(resp['answer'])
|
||||
print(f"Vectara FCS = {resp['fcs']}")
|
||||
```
|
||||
|
||||
## Example Notebooks
|
||||
|
||||
For a more detailed examples of using Vectara with LangChain, see the following example notebooks:
|
||||
* [this notebook](/docs/integrations/vectorstores/vectara) shows how to use Vectara: with full RAG or just as a retriever.
|
||||
* [this notebook](/docs/integrations/retrievers/self_query/vectara_self_query) shows the self-query capability with Vectara.
|
||||
* [this notebook](/docs/integrations/providers/vectara/vectara_chat) shows how to build a chatbot with Langchain and Vectara
|
||||
|
||||
40
docs/docs/integrations/providers/vectorize.mdx
Normal file
40
docs/docs/integrations/providers/vectorize.mdx
Normal file
@@ -0,0 +1,40 @@
|
||||
# Vectorize
|
||||
|
||||
> [Vectorize](https://vectorize.io/) helps you build AI apps faster and with less hassle.
|
||||
> It automates data extraction, finds the best vectorization strategy using RAG evaluation,
|
||||
> and lets you quickly deploy real-time RAG pipelines for your unstructured data.
|
||||
> Your vector search indexes stay up-to-date, and it integrates with your existing vector database,
|
||||
> so you maintain full control of your data.
|
||||
> Vectorize handles the heavy lifting, freeing you to focus on building robust AI solutions without getting bogged down by data management.
|
||||
|
||||
# Installation and Setup
|
||||
|
||||
Install the following Python package:
|
||||
```bash
|
||||
pip install langchain-vectorize
|
||||
```
|
||||
|
||||
Sign up for a free Vectorize account [here](https://platform.vectorize.io/)
|
||||
Generate an access token in the [Access Token](https://docs.vectorize.io/rag-pipelines/retrieval-endpoint#access-tokens) section
|
||||
Gather your organization ID. From the browser url, extract the UUID from the URL after /organization/
|
||||
|
||||
Set up the following variables:
|
||||
```python
|
||||
VECTORIZE_ORG_ID="your-organization-id"
|
||||
VECTORIZE_API_TOKEN="your-api-token"
|
||||
```
|
||||
|
||||
## Retriever
|
||||
|
||||
```python
|
||||
from langchain_vectorize import VectorizeRetriever
|
||||
|
||||
retriever = VectorizeRetriever(
|
||||
api_token=VECTORIZE_API_TOKEN,
|
||||
organization=VECTORIZE_ORG_ID,
|
||||
pipeline_id="...",
|
||||
)
|
||||
retriever.invoke("query")
|
||||
```
|
||||
|
||||
Learn more in the [example notebook](/docs/integrations/retrievers/vectorize).
|
||||
@@ -8,7 +8,6 @@
|
||||
"# Vectara self-querying \n",
|
||||
"\n",
|
||||
"[Vectara](https://vectara.com/) is the trusted AI Assistant and Agent platform which focuses on enterprise readiness for mission-critical applications.\n",
|
||||
"\n",
|
||||
"Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:\n",
|
||||
"1. A way to extract text from files (PDF, PPT, DOCX, etc)\n",
|
||||
"2. ML-based chunking that provides state of the art performance.\n",
|
||||
@@ -17,9 +16,27 @@
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments, including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). \n",
|
||||
"6. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"See the [Vectara API documentation](https://docs.vectara.com/docs/) for more information on how to use the API.\n",
|
||||
"For more information:\n",
|
||||
"- [Documentation](https://docs.vectara.com/docs/)\n",
|
||||
"- [API Playground](https://docs.vectara.com/docs/rest-api/)\n",
|
||||
"- [Quickstart](https://docs.vectara.com/docs/quickstart)\n",
|
||||
"\n",
|
||||
"This notebook shows how to use `SelfQueryRetriever` with Vectara."
|
||||
"\n",
|
||||
"This notebook shows how to use `Vectara` as `SelfQueryRetriever`.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use the `VectaraVectorStore` you first need to install the partner package.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "07f3f1a4-f552-4d07-ba48-18fb5d8641c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!uv pip install -U pip && uv pip install -qU langchain-vectara"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -30,14 +47,14 @@
|
||||
"# Getting Started\n",
|
||||
"\n",
|
||||
"To get started, use the following steps:\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara trial. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara trial.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: `customer ID`, `corpus ID` and `api_key`.\n",
|
||||
"You can provide those to LangChain in two ways:\n",
|
||||
"To use LangChain with Vectara, you'll need to have these two values: `corpus_key` and `api_key`.\n",
|
||||
"You can provide `VECTARA_API_KEY` to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||
"1. Include in your environment these two variables: `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
" For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
@@ -45,8 +62,6 @@
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = getpass.getpass(\"Vectara Customer ID:\")\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = getpass.getpass(\"Vectara Corpus ID:\")\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
@@ -54,14 +69,11 @@
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectara = Vectara(\n",
|
||||
" vectara_customer_id=vectara_customer_id,\n",
|
||||
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
" )\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"In this notebook we assume they are provided in the environment.\n",
|
||||
"\n",
|
||||
"**Notes:** The self-query retriever requires you to have `lark` installed (`pip install lark`). "
|
||||
"In this notebook we assume they are provided in the environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,14 +83,14 @@
|
||||
"source": [
|
||||
"## Connecting to Vectara from LangChain\n",
|
||||
"\n",
|
||||
"In this example, we assume that you've created an account and a corpus, and added your `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY` (created with permissions for both indexing and query) as environment variables.\n",
|
||||
"In this example, we assume that you've created an account and a corpus, and added your `VECTARA_CORPUS_KEY` and `VECTARA_API_KEY` (created with permissions for both indexing and query) as environment variables.\n",
|
||||
"\n",
|
||||
"We further assume the corpus has 4 fields defined as filterable metadata attributes: `year`, `director`, `rating`, and `genre`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"id": "9d3aa44f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -87,14 +99,10 @@
|
||||
"\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<YOUR_VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = \"<YOUR_VECTARA_CORPUS_ID>\"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = \"<YOUR_VECTARA_CUSTOMER_ID>\"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"VECTARA_API_KEY\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_KEY\"] = \"VECTARA_CORPUS_KEY\"\n",
|
||||
"\n",
|
||||
"from langchain.chains.query_constructor.schema import AttributeInfo\n",
|
||||
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||
"from langchain_community.vectorstores import Vectara\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI"
|
||||
"from langchain_vectara import Vectara"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -109,7 +117,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 6,
|
||||
"id": "bcbe04d9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -148,9 +156,12 @@
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"corpus_key = os.getenv(\"VECTARA_CORPUS_KEY\")\n",
|
||||
"vectara = Vectara()\n",
|
||||
"for doc in docs:\n",
|
||||
" vectara.add_texts([doc.page_content], doc_metadata=doc.metadata)"
|
||||
" vectara.add_texts(\n",
|
||||
" [doc.page_content], corpus_key=corpus_key, doc_metadata=doc.metadata\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -158,45 +169,32 @@
|
||||
"id": "5ecaab6d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating the self-querying retriever\n",
|
||||
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents.\n",
|
||||
"## Self-query with Vectara\n",
|
||||
" You don't need self-query via the LangChain mechanism—enabling `intelligent_query_rewriting` on the Vectara platform achieves the same result.\n",
|
||||
"Vectara offers Intelligent Query Rewriting option which enhances search precision by automatically generating metadata filter expressions from natural language queries. This capability analyzes user queries, extracts relevant metadata filters, and rephrases the query to focus on the core information need. For more [details](https://docs.vectara.com/docs/search-and-retrieval/intelligent-query-rewriting).\n",
|
||||
"\n",
|
||||
"We then provide an llm (in this case OpenAI) and the `vectara` vectorstore as arguments:"
|
||||
"Enable intelligent query rewriting on a per-query basis by setting the `intelligent_query_rewriting` parameter to `true` in `VectaraQueryConfig`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 7,
|
||||
"id": "86e34dbf",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"metadata_field_info = [\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"genre\",\n",
|
||||
" description=\"The genre of the movie\",\n",
|
||||
" type=\"string or list[string]\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"year\",\n",
|
||||
" description=\"The year the movie was released\",\n",
|
||||
" type=\"integer\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"director\",\n",
|
||||
" description=\"The name of the movie director\",\n",
|
||||
" type=\"string\",\n",
|
||||
" ),\n",
|
||||
" AttributeInfo(\n",
|
||||
" name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"document_content_description = \"Brief summary of a movie\"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-4o\", max_tokens=4069)\n",
|
||||
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||
" llm, vectara, document_content_description, metadata_field_info, verbose=True\n",
|
||||
"from langchain_vectara.vectorstores import (\n",
|
||||
" CorpusConfig,\n",
|
||||
" SearchConfig,\n",
|
||||
" VectaraQueryConfig,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" search=SearchConfig(corpora=[CorpusConfig(corpus_key=corpus_key)]),\n",
|
||||
" generation=None,\n",
|
||||
" intelligent_query_rewriting=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -205,116 +203,31 @@
|
||||
"id": "ea9df8d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Self-retrieval Queries\n",
|
||||
"And now we can try actually using our retriever!"
|
||||
"## Queries\n",
|
||||
"And now we can try actually using our vectara_queries method!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 8,
|
||||
"id": "38a126e9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'lang': 'eng', 'offset': '0', 'len': '82', 'year': '2019', 'director': 'Greta Gerwig', 'rating': '8.3', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'lang': 'eng', 'offset': '0', 'len': '76', 'year': '2010', 'director': 'Christopher Nolan', 'rating': '8.2', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.invoke(\"What are movies about scientists\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "fc3f1e6e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a filter\n",
|
||||
"retriever.invoke(\"I want to watch a movie rated higher than 8.5\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b19d4da0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'lang': 'eng', 'offset': '0', 'len': '82', 'year': '2019', 'director': 'Greta Gerwig', 'rating': '8.3', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and a filter\n",
|
||||
"retriever.invoke(\"Has Greta Gerwig directed any movies about women\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f900e40e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a composite filter\n",
|
||||
"retriever.invoke(\"What's a highly rated (above 8.5) science fiction film?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "12a51522",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
"[(Document(metadata={'year': 1995, 'genre': 'animated', 'source': 'langchain'}, page_content='Toys come alive and have a blast doing so'),\n",
|
||||
" 0.4141285717487335),\n",
|
||||
" (Document(metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}, page_content='Three men walk into the Zone, three men walk out of the Zone'),\n",
|
||||
" 0.4046250879764557),\n",
|
||||
" (Document(metadata={'year': 2010, 'director': 'Christopher Nolan', 'rating': 8.2, 'source': 'langchain'}, page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...'),\n",
|
||||
" 0.227469339966774),\n",
|
||||
" (Document(metadata={'year': 2019, 'director': 'Greta Gerwig', 'rating': 8.3, 'source': 'langchain'}, page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them'),\n",
|
||||
" 0.19208428263664246),\n",
|
||||
" (Document(metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction', 'source': 'langchain'}, page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose'),\n",
|
||||
" 0.1902722418308258),\n",
|
||||
" (Document(metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6, 'source': 'langchain'}, page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea'),\n",
|
||||
" 0.08151976019144058)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@@ -323,74 +236,107 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and composite filter\n",
|
||||
"retriever.invoke(\n",
|
||||
" \"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39bd1de1-b9fe-4a98-89da-58d8a7a6ae51",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filter k\n",
|
||||
"\n",
|
||||
"We can also use the self query retriever to specify `k`: the number of documents to fetch.\n",
|
||||
"\n",
|
||||
"We can do this by passing `enable_limit=True` to the constructor."
|
||||
"# This example only specifies a relevant query\n",
|
||||
"vectara.vectara_query(\"What are movies about scientists\", config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "bff36b88-b506-4877-9c63-e5a1a8d78e64",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||
" llm,\n",
|
||||
" vectara,\n",
|
||||
" document_content_description,\n",
|
||||
" metadata_field_info,\n",
|
||||
" enable_limit=True,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "00e8baad-a9d7-4498-bd8d-ca41d0691386",
|
||||
"id": "fc3f1e6e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is cool, we can include the number of results we would like to see in the query and the self retriever would correctly understand it. For example, let's look for "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "2758d229-4f97-499c-819f-888acaf8ee10",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'}),\n",
|
||||
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||
"[(Document(metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6, 'source': 'langchain'}, page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea'),\n",
|
||||
" 0.34279149770736694),\n",
|
||||
" (Document(metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}, page_content='Three men walk into the Zone, three men walk out of the Zone'),\n",
|
||||
" 0.242923304438591)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.invoke(\"what are two movies with a rating above 8.5\")"
|
||||
"# This example only specifies a filter\n",
|
||||
"vectara.vectara_query(\"I want to watch a movie rated higher than 8.5\", config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "b19d4da0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[(Document(metadata={'year': 2019, 'director': 'Greta Gerwig', 'rating': 8.3, 'source': 'langchain'}, page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them'),\n",
|
||||
" 0.10141132771968842)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and a filter\n",
|
||||
"vectara.vectara_query(\"Has Greta Gerwig directed any movies about women\", config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "f900e40e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[(Document(metadata={'year': 1979, 'rating': 9.9, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}, page_content='Three men walk into the Zone, three men walk out of the Zone'),\n",
|
||||
" 0.9508692026138306)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a composite filter\n",
|
||||
"vectara.vectara_query(\"What's a highly rated (above 8.5) science fiction film?\", config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "12a51522",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[(Document(metadata={'year': 1995, 'genre': 'animated', 'source': 'langchain'}, page_content='Toys come alive and have a blast doing so'),\n",
|
||||
" 0.7290377616882324),\n",
|
||||
" (Document(metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction', 'source': 'langchain'}, page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose'),\n",
|
||||
" 0.4838160574436188)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and composite filter\n",
|
||||
"vectara.vectara_query(\n",
|
||||
" \"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\",\n",
|
||||
" config,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -418,7 +364,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.8"
|
||||
"version": "3.12.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
425
docs/docs/integrations/retrievers/vectorize.ipynb
Normal file
425
docs/docs/integrations/retrievers/vectorize.ipynb
Normal file
@@ -0,0 +1,425 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "raw",
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Vectorize\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "zvHrM3wa7IE1"
|
||||
},
|
||||
"source": [
|
||||
"# VectorizeRetriever\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the LangChain Vectorize retriever.\n",
|
||||
"\n",
|
||||
"> [Vectorize](https://vectorize.io/) helps you build AI apps faster and with less hassle.\n",
|
||||
"> It automates data extraction, finds the best vectorization strategy using RAG evaluation,\n",
|
||||
"> and lets you quickly deploy real-time RAG pipelines for your unstructured data.\n",
|
||||
"> Your vector search indexes stay up-to-date, and it integrates with your existing vector database,\n",
|
||||
"> so you maintain full control of your data.\n",
|
||||
"> Vectorize handles the heavy lifting, freeing you to focus on building robust AI solutions without getting bogged down by data management.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "r-RswOO5o4K_"
|
||||
},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"In the following steps, we'll setup the Vectorize environment and create a RAG pipeline.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "FhvmvFKh4Rlh"
|
||||
},
|
||||
"source": [
|
||||
"### Create a Vectorize Account & Get Your Access Token\n",
|
||||
"\n",
|
||||
"Sign up for a free Vectorize account [here](https://platform.vectorize.io/)\n",
|
||||
"Generate an access token in the [Access Token](https://docs.vectorize.io/rag-pipelines/retrieval-endpoint#access-tokens) section\n",
|
||||
"Gather your organization ID. From the browser url, extract the UUID from the URL after /organization/"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "L2SULMfWpWFX"
|
||||
},
|
||||
"source": [
|
||||
"### Configure token and organization ID\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "BnF8KoDZpg2O"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"VECTORIZE_ORG_ID = getpass.getpass(\"Enter Vectorize organization ID: \")\n",
|
||||
"VECTORIZE_API_TOKEN = getpass.getpass(\"Enter Vectorize API Token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"id": "JdZ5vlzjoDVr"
|
||||
},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This retriever lives in the `langchain-vectorize` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"id": "IJFmtvDLn5R3"
|
||||
},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": "!pip install -qU langchain-vectorize"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Oj10Moznpz67"
|
||||
},
|
||||
"source": "### Download a PDF file"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "eLbbTPytrgNw"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": "!wget \"https://raw.githubusercontent.com/vectorize-io/vectorize-clients/refs/tags/python-0.1.3/tests/python/tests/research.pdf\""
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "7g54J6awtshs"
|
||||
},
|
||||
"source": "### Initialize the vectorize client"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "9Fr4yz5CrFWP"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import vectorize_client as v\n",
|
||||
"\n",
|
||||
"api = v.ApiClient(v.Configuration(access_token=VECTORIZE_API_TOKEN))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "wPDoeqETxJrS"
|
||||
},
|
||||
"source": "### Create a File Upload Source Connector"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "9yEARIcFue5N"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import urllib3\n",
|
||||
"\n",
|
||||
"connectors_api = v.ConnectorsApi(api)\n",
|
||||
"response = connectors_api.create_source_connector(\n",
|
||||
" VECTORIZE_ORG_ID, [{\"type\": \"FILE_UPLOAD\", \"name\": \"From API\"}]\n",
|
||||
")\n",
|
||||
"source_connector_id = response.connectors[0].id"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "yU3lS6dpxZnQ"
|
||||
},
|
||||
"source": "### Upload the PDF file"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "OIiMIZ8ZxUYF"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"file_path = \"research.pdf\"\n",
|
||||
"\n",
|
||||
"http = urllib3.PoolManager()\n",
|
||||
"uploads_api = v.UploadsApi(api)\n",
|
||||
"metadata = {\"created-from-api\": True}\n",
|
||||
"\n",
|
||||
"upload_response = uploads_api.start_file_upload_to_connector(\n",
|
||||
" VECTORIZE_ORG_ID,\n",
|
||||
" source_connector_id,\n",
|
||||
" v.StartFileUploadToConnectorRequest(\n",
|
||||
" name=file_path.split(\"/\")[-1],\n",
|
||||
" content_type=\"application/pdf\",\n",
|
||||
" # add additional metadata that will be stored along with each chunk in the vector database\n",
|
||||
" metadata=json.dumps(metadata),\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"with open(file_path, \"rb\") as f:\n",
|
||||
" response = http.request(\n",
|
||||
" \"PUT\",\n",
|
||||
" upload_response.upload_url,\n",
|
||||
" body=f,\n",
|
||||
" headers={\n",
|
||||
" \"Content-Type\": \"application/pdf\",\n",
|
||||
" \"Content-Length\": str(os.path.getsize(file_path)),\n",
|
||||
" },\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"if response.status != 200:\n",
|
||||
" print(\"Upload failed: \", response.data)\n",
|
||||
"else:\n",
|
||||
" print(\"Upload successful\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "PdJJfOhfxiIo"
|
||||
},
|
||||
"source": "### Connect to the AI Platform and Vector Database"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "0ZSGhXJfxjBb"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ai_platforms = connectors_api.get_ai_platform_connectors(VECTORIZE_ORG_ID)\n",
|
||||
"builtin_ai_platform = [\n",
|
||||
" c.id for c in ai_platforms.ai_platform_connectors if c.type == \"VECTORIZE\"\n",
|
||||
"][0]\n",
|
||||
"\n",
|
||||
"vector_databases = connectors_api.get_destination_connectors(VECTORIZE_ORG_ID)\n",
|
||||
"builtin_vector_db = [\n",
|
||||
" c.id for c in vector_databases.destination_connectors if c.type == \"VECTORIZE\"\n",
|
||||
"][0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "JWoL-kqQxs5H"
|
||||
},
|
||||
"source": "### Configure and Deploy the Pipeline"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "hze9vJbQxvqA"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pipelines = v.PipelinesApi(api)\n",
|
||||
"response = pipelines.create_pipeline(\n",
|
||||
" VECTORIZE_ORG_ID,\n",
|
||||
" v.PipelineConfigurationSchema(\n",
|
||||
" source_connectors=[\n",
|
||||
" v.SourceConnectorSchema(\n",
|
||||
" id=source_connector_id, type=\"FILE_UPLOAD\", config={}\n",
|
||||
" )\n",
|
||||
" ],\n",
|
||||
" destination_connector=v.DestinationConnectorSchema(\n",
|
||||
" id=builtin_vector_db, type=\"VECTORIZE\", config={}\n",
|
||||
" ),\n",
|
||||
" ai_platform=v.AIPlatformSchema(\n",
|
||||
" id=builtin_ai_platform, type=\"VECTORIZE\", config={}\n",
|
||||
" ),\n",
|
||||
" pipeline_name=\"My Pipeline From API\",\n",
|
||||
" schedule=v.ScheduleSchema(type=\"manual\"),\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"pipeline_id = response.data.id"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"### Configure tracing (optional)\n",
|
||||
"\n",
|
||||
"If you want to get automated tracing from individual queries, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5ULion9wyj6T"
|
||||
},
|
||||
"source": "## Instantiation"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "9D-QfiW7yoe0"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_vectorize.retrievers import VectorizeRetriever\n",
|
||||
"\n",
|
||||
"retriever = VectorizeRetriever(\n",
|
||||
" api_token=VECTORIZE_API_TOKEN,\n",
|
||||
" organization=VECTORIZE_ORG_ID,\n",
|
||||
" pipeline_id=pipeline_id,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": [
|
||||
"query = \"Apple Shareholders equity\"\n",
|
||||
"retriever.invoke(query, num_results=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Use within a chain\n",
|
||||
"\n",
|
||||
"Like other retrievers, VectorizeRetriever can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"\"\"Answer the question based only on the context provided.\n",
|
||||
"\n",
|
||||
"Context: {context}\n",
|
||||
"\n",
|
||||
"Question: {question}\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": "chain.invoke(\"...\")"
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all VectorizeRetriever features and configurations head to the [API reference](https://python.langchain.com/api_reference/vectorize/langchain_vectorize.retrievers.VectorizeRetriever.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -1,262 +1,262 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "62727aaa-bcff-4087-891c-e539f824ee1f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Wikipedia\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d62a16c1-10de-4f99-b392-c4ad2e6123a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# WikipediaRetriever\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
">[Wikipedia](https://wikipedia.org/) is a multilingual free online encyclopedia written and maintained by a community of volunteers, known as Wikipedians, through open collaboration and using a wiki-based editing system called MediaWiki. `Wikipedia` is the largest and most-read reference work in history.\n",
|
||||
"\n",
|
||||
"This notebook shows how to retrieve wiki pages from `wikipedia.org` into the [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) format that is used downstream.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import {ItemTable} from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"external_retrievers\" item=\"WikipediaRetriever\" />"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eb7d377c-168b-40e8-bd61-af6a4fb1b44f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1bbc6013-2617-4f7e-9d8b-7453d09315c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "51489529-5dcd-4b86-bda6-de0a39d8ffd1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package. We also need to install the `wikipedia` python package itself."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1a737220",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae622ac6-d18a-4754-a4bd-d30a078c19b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6c15470b-a16b-4e0d-bc6a-6998bafbb5a4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can instantiate our retriever:\n",
|
||||
"\n",
|
||||
"`WikipediaRetriever` parameters include:\n",
|
||||
"- optional `lang`: default=\"en\". Use it to search in a specific language part of Wikipedia\n",
|
||||
"- optional `load_max_docs`: default=100. Use it to limit number of downloaded documents. It takes time to download all 100 documents, so use a small number for experiments. There is a hard limit of 300 for now.\n",
|
||||
"- optional `load_all_available_meta`: default=False. By default only the most important fields downloaded: `Published` (date when document was published/last updated), `title`, `Summary`. If True, other fields also downloaded.\n",
|
||||
"\n",
|
||||
"`get_relevant_documents()` has one argument, `query`: free text which used to find documents in Wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b78f0cd0-ffea-4fe3-9d1d-54639c4ef1ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.retrievers import WikipediaRetriever\n",
|
||||
"\n",
|
||||
"retriever = WikipediaRetriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12aead36-7b97-4d9c-82e7-ec644a3127f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "54a76605-6b1e-44bf-b8a2-7d48119290c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = retriever.invoke(\"TOKYO GHOUL\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "65ada2b7-3507-4dcb-9982-5f8f4e97a2e1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokyo Ghoul (Japanese: 東京喰種(トーキョーグール), Hepburn: Tōkyō Gūru) is a Japanese dark fantasy manga series written and illustrated by Sui Ishida. It was serialized in Shueisha's seinen manga magazine Weekly Young Jump from September 2011 to September 2014, with its chapters collected in 14 tankōbon volumes. The story is set in an alternate version of Tokyo where humans coexist with ghouls, beings who loo\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content[:400])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae3c3d16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within a chain\n",
|
||||
"Like other retrievers, `WikipediaRetriever` can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4bd3d268-eb8c-46e9-930a-18f5e2a50008",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9b52bc65-1b2e-4c30-ab43-41eaa5bf79c3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"\"\"\n",
|
||||
" Answer the question based only on the context provided.\n",
|
||||
" Context: {context}\n",
|
||||
" Question: {question}\n",
|
||||
" \"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d268905-3b19-4338-ac10-223c0fe4d5e4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The main character in Tokyo Ghoul is Ken Kaneki, who transforms into a ghoul after receiving an organ transplant from a ghoul named Rize.'"
|
||||
"cell_type": "raw",
|
||||
"id": "62727aaa-bcff-4087-891c-e539f824ee1f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Wikipedia\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d62a16c1-10de-4f99-b392-c4ad2e6123a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# WikipediaRetriever\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
">[Wikipedia](https://wikipedia.org/) is a multilingual free online encyclopedia written and maintained by a community of volunteers, known as Wikipedians, through open collaboration and using a wiki-based editing system called MediaWiki. `Wikipedia` is the largest and most-read reference work in history.\n",
|
||||
"\n",
|
||||
"This notebook shows how to retrieve wiki pages from `wikipedia.org` into the [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) format that is used downstream.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import {ItemTable} from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"external_retrievers\" item=\"WikipediaRetriever\" />"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eb7d377c-168b-40e8-bd61-af6a4fb1b44f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"To enable automated tracing of individual tools, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1bbc6013-2617-4f7e-9d8b-7453d09315c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "51489529-5dcd-4b86-bda6-de0a39d8ffd1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The integration lives in the `langchain-community` package. We also need to install the `wikipedia` python package itself."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1a737220",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae622ac6-d18a-4754-a4bd-d30a078c19b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6c15470b-a16b-4e0d-bc6a-6998bafbb5a4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now we can instantiate our retriever:\n",
|
||||
"\n",
|
||||
"`WikipediaRetriever` parameters include:\n",
|
||||
"- optional `lang`: default=\"en\". Use it to search in a specific language part of Wikipedia\n",
|
||||
"- optional `load_max_docs`: default=100. Use it to limit number of downloaded documents. It takes time to download all 100 documents, so use a small number for experiments. There is a hard limit of 300 for now.\n",
|
||||
"- optional `load_all_available_meta`: default=False. By default only the most important fields downloaded: `Published` (date when document was published/last updated), `title`, `Summary`. If True, other fields also downloaded.\n",
|
||||
"\n",
|
||||
"`get_relevant_documents()` has one argument, `query`: free text which used to find documents in Wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b78f0cd0-ffea-4fe3-9d1d-54639c4ef1ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.retrievers import WikipediaRetriever\n",
|
||||
"\n",
|
||||
"retriever = WikipediaRetriever()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12aead36-7b97-4d9c-82e7-ec644a3127f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "54a76605-6b1e-44bf-b8a2-7d48119290c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = retriever.invoke(\"TOKYO GHOUL\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "65ada2b7-3507-4dcb-9982-5f8f4e97a2e1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tokyo Ghoul (Japanese: 東京喰種(トーキョーグール), Hepburn: Tōkyō Gūru) is a Japanese dark fantasy manga series written and illustrated by Sui Ishida. It was serialized in Shueisha's seinen manga magazine Weekly Young Jump from September 2011 to September 2014, with its chapters collected in 14 tankōbon volumes. The story is set in an alternate version of Tokyo where humans coexist with ghouls, beings who loo\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(docs[0].page_content[:400])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae3c3d16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within a chain\n",
|
||||
"Like other retrievers, `WikipediaRetriever` can be incorporated into LLM applications via [chains](/docs/how_to/sequence/).\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4bd3d268-eb8c-46e9-930a-18f5e2a50008",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9b52bc65-1b2e-4c30-ab43-41eaa5bf79c3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"\"\"\n",
|
||||
" Answer the question based only on the context provided.\n",
|
||||
" Context: {context}\n",
|
||||
" Question: {question}\n",
|
||||
" \"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d268905-3b19-4338-ac10-223c0fe4d5e4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The main character in Tokyo Ghoul is Ken Kaneki, who transforms into a ghoul after receiving an organ transplant from a ghoul named Rize.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\n",
|
||||
" \"Who is the main character in `Tokyo Ghoul` and does he transform into a ghoul?\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "236bbafb-ebd4-4165-9b8f-d47605f6eef3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `WikipediaRetriever` features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/retrievers/langchain_community.retrievers.wikipedia.WikipediaRetriever.html#langchain-community-retrievers-wikipedia-wikipediaretriever)."
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(\n",
|
||||
" \"Who is the main character in `Tokyo Ghoul` and does he transform into a ghoul?\"\n",
|
||||
")"
|
||||
]
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "236bbafb-ebd4-4165-9b8f-d47605f6eef3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `WikipediaRetriever` features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/retrievers/langchain_community.retrievers.wikipedia.WikipediaRetriever.html#langchain-community-retrievers-wikipedia-wikipediaretriever)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" query: str: The search query to be used. Try to keep this specific and short, e.g. a specific topic or author name\n",
|
||||
" itemType: Optional. Type of item to search for (e.g. \"book\" or \"journalArticle\"). Multiple types can be passed as a string seperated by \"||\", e.g. \"book || journalArticle\". Defaults to all types.\n",
|
||||
" itemType: Optional. Type of item to search for (e.g. \"book\" or \"journalArticle\"). Multiple types can be passed as a string separated by \"||\", e.g. \"book || journalArticle\". Defaults to all types.\n",
|
||||
" tag: Optional. For searching over tags attached to library items. If documents tagged with multiple tags are to be retrieved, pass them as a list. If documents with any of the tags are to be retrieved, pass them as a string separated by \"||\", e.g. \"tag1 || tag2\"\n",
|
||||
" qmode: Search mode to use. Changes what the query searches over. \"everything\" includes full-text content. \"titleCreatorYear\" to search over title, authors and year. Defaults to \"everything\".\n",
|
||||
" since: Return only objects modified after the specified library version. Defaults to return everything.\n",
|
||||
|
||||
@@ -1,272 +1,270 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AI21\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AI21Embeddings\n",
|
||||
"\n",
|
||||
":::caution This service is deprecated. :::\n",
|
||||
"\n",
|
||||
"This will help you get started with AI21 embedding models using LangChain. For detailed documentation on `AI21Embeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/ai21/embeddings/langchain_ai21.embeddings.AI21Embeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"AI21\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access AI21 embedding models you'll need to create an AI21 account, get an API key, and install the `langchain-ai21` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://docs.ai21.com/](https://docs.ai21.com/) to sign up to AI21 and generate an API key. Once you've done this set the `AI21_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"AI21_API_KEY\"):\n",
|
||||
" os.environ[\"AI21_API_KEY\"] = getpass.getpass(\"Enter your AI21 API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain AI21 integration lives in the `langchain-ai21` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-ai21"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21Embeddings\n",
|
||||
"\n",
|
||||
"embeddings = AI21Embeddings(\n",
|
||||
" # Can optionally increase or decrease the batch_size\n",
|
||||
" # to improve latency.\n",
|
||||
" # Use larger batch sizes with smaller documents, and\n",
|
||||
" # smaller batch sizes with larger documents.\n",
|
||||
" # batch_size=256,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AI21\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.01913362182676792, 0.004960147198289633, -0.01582135073840618, -0.042474791407585144, 0.040200788\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AI21Embeddings\n",
|
||||
"\n",
|
||||
":::caution This service is deprecated. :::\n",
|
||||
"\n",
|
||||
"This will help you get started with AI21 embedding models using LangChain. For detailed documentation on `AI21Embeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/ai21/embeddings/langchain_ai21.embeddings.AI21Embeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"AI21\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access AI21 embedding models you'll need to create an AI21 account, get an API key, and install the `langchain-ai21` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://docs.ai21.com/](https://docs.ai21.com/) to sign up to AI21 and generate an API key. Once you've done this set the `AI21_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.03029559925198555, 0.002908500377088785, -0.02700909972190857, -0.04616579785943031, 0.0382771529\n",
|
||||
"[0.018214847892522812, 0.011460083536803722, -0.03329407051205635, -0.04951060563325882, 0.032756105\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"AI21_API_KEY\"):\n",
|
||||
" os.environ[\"AI21_API_KEY\"] = getpass.getpass(\"Enter your AI21 API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain AI21 integration lives in the `langchain-ai21` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-ai21"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21Embeddings\n",
|
||||
"\n",
|
||||
"embeddings = AI21Embeddings(\n",
|
||||
" # Can optionally increase or decrease the batch_size\n",
|
||||
" # to improve latency.\n",
|
||||
" # Use larger batch sizes with smaller documents, and\n",
|
||||
" # smaller batch sizes with larger documents.\n",
|
||||
" # batch_size=256,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.01913362182676792, 0.004960147198289633, -0.01582135073840618, -0.042474791407585144, 0.040200788\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.03029559925198555, 0.002908500377088785, -0.02700909972190857, -0.04616579785943031, 0.0382771529\n",
|
||||
"[0.018214847892522812, 0.011460083536803722, -0.03329407051205635, -0.04951060563325882, 0.032756105\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `AI21Embeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/ai21/embeddings/langchain_ai21.embeddings.AI21Embeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `AI21Embeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/ai21/embeddings/langchain_ai21.embeddings.AI21Embeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,280 +1,278 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AzureOpenAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AzureOpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with AzureOpenAI embedding models using LangChain. For detailed documentation on `AzureOpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.azure.AzureOpenAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"AzureOpenAI\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access AzureOpenAI embedding models you'll need to create an Azure account, get an API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"You’ll need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following this [guide](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n",
|
||||
"\n",
|
||||
"Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the “Keys and Endpoint” section of your instance.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"AZURE_OPENAI_ENDPOINT=<YOUR API ENDPOINT>\n",
|
||||
"AZURE_OPENAI_API_KEY=<YOUR_KEY>\n",
|
||||
"AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"AZURE_OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your AzureOpenAI API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain AzureOpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import AzureOpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = AzureOpenAIEmbeddings(\n",
|
||||
" model=\"text-embedding-3-large\",\n",
|
||||
" # dimensions: Optional[int] = None, # Can specify dimensions with new text-embedding-3 models\n",
|
||||
" # azure_endpoint=\"https://<your-endpoint>.openai.azure.com/\", If not provided, will read env variable AZURE_OPENAI_ENDPOINT\n",
|
||||
" # api_key=... # Can provide an API key directly. If missing read env variable AZURE_OPENAI_API_KEY\n",
|
||||
" # openai_api_version=..., # If not provided, will read env variable AZURE_OPENAI_API_VERSION\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AzureOpenAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.0011676070280373096, 0.007125577889382839, -0.014674457721412182, -0.034061674028635025, 0.01128\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# AzureOpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with AzureOpenAI embedding models using LangChain. For detailed documentation on `AzureOpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.azure.AzureOpenAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"AzureOpenAI\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access AzureOpenAI embedding models you'll need to create an Azure account, get an API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"You’ll need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following this [guide](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n",
|
||||
"\n",
|
||||
"Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the “Keys and Endpoint” section of your instance.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"AZURE_OPENAI_ENDPOINT=<YOUR API ENDPOINT>\n",
|
||||
"AZURE_OPENAI_API_KEY=<YOUR_KEY>\n",
|
||||
"AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.0011966148158535361, 0.007160289213061333, -0.014659193344414234, -0.03403077274560928, 0.011280\n",
|
||||
"[-0.005595256108790636, 0.016757294535636902, -0.011055258102715015, -0.031094247475266457, -0.00363\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"AZURE_OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your AzureOpenAI API key: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain AzureOpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import AzureOpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = AzureOpenAIEmbeddings(\n",
|
||||
" model=\"text-embedding-3-large\",\n",
|
||||
" # dimensions: Optional[int] = None, # Can specify dimensions with new text-embedding-3 models\n",
|
||||
" # azure_endpoint=\"https://<your-endpoint>.openai.azure.com/\", If not provided, will read env variable AZURE_OPENAI_ENDPOINT\n",
|
||||
" # api_key=... # Can provide an API key directly. If missing read env variable AZURE_OPENAI_API_KEY\n",
|
||||
" # openai_api_version=..., # If not provided, will read env variable AZURE_OPENAI_API_VERSION\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.0011676070280373096, 0.007125577889382839, -0.014674457721412182, -0.034061674028635025, 0.01128\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.0011966148158535361, 0.007160289213061333, -0.014659193344414234, -0.03403077274560928, 0.011280\n",
|
||||
"[-0.005595256108790636, 0.016757294535636902, -0.011055258102715015, -0.031094247475266457, -0.00363\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `AzureOpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.azure.AzureOpenAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `AzureOpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.azure.AzureOpenAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,267 +1,265 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cohere\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CohereEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Cohere embedding models using LangChain. For detailed documentation on `CohereEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/cohere/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Cohere\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Cohere embedding models you'll need to create a/an Cohere account, get an API key, and install the `langchain-cohere` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to [cohere.com](https://cohere.com) to sign up to Cohere and generate an API key. Once you’ve done this set the COHERE_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"COHERE_API_KEY\"):\n",
|
||||
" os.environ[\"COHERE_API_KEY\"] = getpass.getpass(\"Enter your Cohere API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Cohere integration lives in the `langchain-cohere` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-cohere"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_cohere import CohereEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = CohereEmbeddings(\n",
|
||||
" model=\"embed-english-v3.0\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cohere\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.022979736, -0.030212402, -0.08886719, -0.08569336, 0.007030487, -0.0010671616, -0.033813477, 0.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CohereEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Cohere embedding models using LangChain. For detailed documentation on `CohereEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/cohere/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Cohere\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Cohere embedding models you'll need to create a/an Cohere account, get an API key, and install the `langchain-cohere` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to [cohere.com](https://cohere.com) to sign up to Cohere and generate an API key. Once you’ve done this set the COHERE_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.028869629, -0.030410767, -0.099121094, -0.07116699, -0.012748718, -0.0059432983, -0.04360962, 0.\n",
|
||||
"[-0.047332764, -0.049957275, -0.07458496, -0.034332275, -0.057922363, -0.0112838745, -0.06994629, 0.\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"COHERE_API_KEY\"):\n",
|
||||
" os.environ[\"COHERE_API_KEY\"] = getpass.getpass(\"Enter your Cohere API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Cohere integration lives in the `langchain-cohere` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-cohere"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_cohere import CohereEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = CohereEmbeddings(\n",
|
||||
" model=\"embed-english-v3.0\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.022979736, -0.030212402, -0.08886719, -0.08569336, 0.007030487, -0.0010671616, -0.033813477, 0.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.028869629, -0.030410767, -0.099121094, -0.07116699, -0.012748718, -0.0059432983, -0.04360962, 0.\n",
|
||||
"[-0.047332764, -0.049957275, -0.07458496, -0.034332275, -0.057922363, -0.0112838745, -0.06994629, 0.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `CohereEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/cohere/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `CohereEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/cohere/embeddings/langchain_cohere.embeddings.CohereEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,267 +1,265 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Fireworks\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Fireworks embedding models using LangChain. For detailed documentation on `FireworksEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/fireworks/embeddings/langchain_fireworks.embeddings.FireworksEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Fireworks\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Fireworks embedding models you'll need to create a Fireworks account, get an API key, and install the `langchain-fireworks` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [fireworks.ai](https://fireworks.ai/) to sign up to Fireworks and generate an API key. Once you’ve done this set the FIREWORKS_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"FIREWORKS_API_KEY\"):\n",
|
||||
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Enter your Fireworks API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Fireworks integration lives in the `langchain-fireworks` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-fireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_fireworks import FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = FireworksEmbeddings(\n",
|
||||
" model=\"nomic-ai/nomic-embed-text-v1.5\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Fireworks\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.01666259765625, 0.011688232421875, -0.1181640625, -0.10205078125, 0.05438232421875, -0.0890502929\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Fireworks embedding models using LangChain. For detailed documentation on `FireworksEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/fireworks/embeddings/langchain_fireworks.embeddings.FireworksEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Fireworks\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Fireworks embedding models you'll need to create a Fireworks account, get an API key, and install the `langchain-fireworks` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [fireworks.ai](https://fireworks.ai/) to sign up to Fireworks and generate an API key. Once you’ve done this set the FIREWORKS_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.016632080078125, 0.01165008544921875, -0.1181640625, -0.10186767578125, 0.05438232421875, -0.0890\n",
|
||||
"[-0.02667236328125, 0.036651611328125, -0.1630859375, -0.0904541015625, -0.022430419921875, -0.09545\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"FIREWORKS_API_KEY\"):\n",
|
||||
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Enter your Fireworks API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Fireworks integration lives in the `langchain-fireworks` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-fireworks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_fireworks import FireworksEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = FireworksEmbeddings(\n",
|
||||
" model=\"nomic-ai/nomic-embed-text-v1.5\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.01666259765625, 0.011688232421875, -0.1181640625, -0.10205078125, 0.05438232421875, -0.0890502929\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.016632080078125, 0.01165008544921875, -0.1181640625, -0.10186767578125, 0.05438232421875, -0.0890\n",
|
||||
"[-0.02667236328125, 0.036651611328125, -0.1630859375, -0.0904541015625, -0.022430419921875, -0.09545\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3fba556a-b53d-431c-b0c6-ffb1e2fa5a6e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `FireworksEmbeddings` features and configurations head to the [API reference](https://python.langchain.com/api_reference/fireworks/embeddings/langchain_fireworks.embeddings.FireworksEmbeddings.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3fba556a-b53d-431c-b0c6-ffb1e2fa5a6e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `FireworksEmbeddings` features and configurations head to the [API reference](https://python.langchain.com/api_reference/fireworks/embeddings/langchain_fireworks.embeddings.FireworksEmbeddings.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,323 +1,321 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Vertex AI \n",
|
||||
"keywords: [Vertex AI, vertexai , Google Cloud, embeddings]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Vertex AI Embeddings \n",
|
||||
"\n",
|
||||
"This will help you get started with Google Vertex AI Embeddings models using LangChain. For detailed documentation on `Google Vertex AI Embeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/google_vertexai/embeddings/langchain_google_vertexai.embeddings.VertexAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Provider | Package |\n",
|
||||
"|:--------:|:-------:|\n",
|
||||
"| [Google](https://python.langchain.com/docs/integrations/providers/google/) | [langchain-google-vertexai](https://python.langchain.com/api_reference/google_vertexai/embeddings/langchain_google_vertexai.embeddings.VertexAIEmbeddings.html) |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Google Vertex AI Embeddings models you'll need to \n",
|
||||
"- Create a Google Cloud account \n",
|
||||
"- Install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to [Google Cloud](https://cloud.google.com/free/) to sign up to create an account. Once you've done this set the GOOGLE_APPLICATION_CREDENTIALS environment variable:\n",
|
||||
"\n",
|
||||
"For more information, see:\n",
|
||||
"\n",
|
||||
"https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "caaba519-3476-423b-a5e4-d99a10929506",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**OPTIONAL : Authenticate your notebook environment (Colab only)**\n",
|
||||
"\n",
|
||||
"If you're running this notebook on Google Colab, run the cell below to authenticate your environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b0770000-3667-439b-8c46-acc5af7c8e40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"if \"google.colab\" in sys.modules:\n",
|
||||
" from google.colab import auth\n",
|
||||
"\n",
|
||||
" auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9fbd4a33-2480-4ad1-8d56-aec730b3662b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Set Google Cloud project information and initialize Vertex AI SDK**\n",
|
||||
"\n",
|
||||
"To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n",
|
||||
"\n",
|
||||
"Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n",
|
||||
"LOCATION = \"us-central1\" # @param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"import vertexai\n",
|
||||
"\n",
|
||||
"vertexai.init(project=PROJECT_ID, location=LOCATION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Google Vertex AI Embeddings integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate embeddings:\n",
|
||||
">Check the list of [Supported Models](https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-text-embeddings#supported-models)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import VertexAIEmbeddings\n",
|
||||
"\n",
|
||||
"# Initialize the a specific Embeddings Model version\n",
|
||||
"embeddings = VertexAIEmbeddings(model_name=\"text-embedding-004\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Vertex AI\n",
|
||||
"keywords: [Vertex AI, vertexai , Google Cloud, embeddings]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.02831101417541504, 0.022063178941607475, -0.07454229146242142, 0.006448323838412762, 0.001955120\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Vertex AI Embeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Google Vertex AI Embeddings models using LangChain. For detailed documentation on `Google Vertex AI Embeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/google_vertexai/embeddings/langchain_google_vertexai.embeddings.VertexAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Provider | Package |\n",
|
||||
"|:--------:|:-------:|\n",
|
||||
"| [Google](https://python.langchain.com/docs/integrations/providers/google/) | [langchain-google-vertexai](https://python.langchain.com/api_reference/google_vertexai/embeddings/langchain_google_vertexai.embeddings.VertexAIEmbeddings.html) |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Google Vertex AI Embeddings models you'll need to\n",
|
||||
"- Create a Google Cloud account\n",
|
||||
"- Install the `langchain-google-vertexai` integration package.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to [Google Cloud](https://cloud.google.com/free/) to sign up to create an account. Once you've done this set the GOOGLE_APPLICATION_CREDENTIALS environment variable:\n",
|
||||
"\n",
|
||||
"For more information, see:\n",
|
||||
"\n",
|
||||
"https://cloud.google.com/docs/authentication/application-default-credentials#GAC\n",
|
||||
"https://googleapis.dev/python/google-auth/latest/reference/google.auth.html#module-google.auth"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.01092718355357647, 0.01213780976831913, -0.05650627985596657, 0.006737854331731796, 0.0085973171\n",
|
||||
"[0.010135706514120102, 0.01234869472682476, -0.07284046709537506, 0.00027134662377648056, 0.01546290\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"id": "caaba519-3476-423b-a5e4-d99a10929506",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**OPTIONAL : Authenticate your notebook environment (Colab only)**\n",
|
||||
"\n",
|
||||
"If you're running this notebook on Google Colab, run the cell below to authenticate your environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b0770000-3667-439b-8c46-acc5af7c8e40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"if \"google.colab\" in sys.modules:\n",
|
||||
" from google.colab import auth\n",
|
||||
"\n",
|
||||
" auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9fbd4a33-2480-4ad1-8d56-aec730b3662b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Set Google Cloud project information and initialize Vertex AI SDK**\n",
|
||||
"\n",
|
||||
"To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n",
|
||||
"\n",
|
||||
"Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n",
|
||||
"LOCATION = \"us-central1\" # @param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"import vertexai\n",
|
||||
"\n",
|
||||
"vertexai.init(project=PROJECT_ID, location=LOCATION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Google Vertex AI Embeddings integration lives in the `langchain-google-vertexai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-vertexai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate embeddings:\n",
|
||||
">Check the list of [Supported Models](https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-text-embeddings#supported-models)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import VertexAIEmbeddings\n",
|
||||
"\n",
|
||||
"# Initialize the a specific Embeddings Model version\n",
|
||||
"embeddings = VertexAIEmbeddings(model_name=\"text-embedding-004\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.02831101417541504, 0.022063178941607475, -0.07454229146242142, 0.006448323838412762, 0.001955120\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.01092718355357647, 0.01213780976831913, -0.05650627985596657, 0.006737854331731796, 0.0085973171\n",
|
||||
"[0.010135706514120102, 0.01234869472682476, -0.07284046709537506, 0.00027134662377648056, 0.01546290\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `Google Vertex AI Embeddings\n",
|
||||
"` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/google_vertexai/embeddings/langchain_google_vertexai.embeddings.VertexAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"environment": {
|
||||
"kernel": "python310",
|
||||
"name": "tf2-gpu.2-6.m104",
|
||||
"type": "gcloud",
|
||||
"uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-6:m104"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `Google Vertex AI Embeddings\n",
|
||||
"` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/google_vertexai/embeddings/langchain_google_vertexai.embeddings.VertexAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"environment": {
|
||||
"kernel": "python310",
|
||||
"name": "tf2-gpu.2-6.m104",
|
||||
"type": "gcloud",
|
||||
"uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-6:m104"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,266 +1,264 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: MistralAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MistralAIEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with MistralAI embedding models using LangChain. For detailed documentation on `MistralAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/mistralai/embeddings/langchain_mistralai.embeddings.MistralAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"MistralAI\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access MistralAI embedding models you'll need to create a/an MistralAI account, get an API key, and install the `langchain-mistralai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://console.mistral.ai/](https://console.mistral.ai/) to sign up to MistralAI and generate an API key. Once you've done this set the MISTRALAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"MISTRALAI_API_KEY\"):\n",
|
||||
" os.environ[\"MISTRALAI_API_KEY\"] = getpass.getpass(\"Enter your MistralAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain MistralAI integration lives in the `langchain-mistralai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-mistralai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_mistralai import MistralAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = MistralAIEmbeddings(\n",
|
||||
" model=\"mistral-embed\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: MistralAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.04443359375, 0.01885986328125, 0.018035888671875, -0.00864410400390625, 0.049652099609375, -0.00\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MistralAIEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with MistralAI embedding models using LangChain. For detailed documentation on `MistralAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/mistralai/embeddings/langchain_mistralai.embeddings.MistralAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"MistralAI\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access MistralAI embedding models you'll need to create a/an MistralAI account, get an API key, and install the `langchain-mistralai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://console.mistral.ai/](https://console.mistral.ai/) to sign up to MistralAI and generate an API key. Once you've done this set the MISTRALAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.04443359375, 0.01885986328125, 0.0180511474609375, -0.0086517333984375, 0.049652099609375, -0.00\n",
|
||||
"[-0.02032470703125, 0.02606201171875, 0.051605224609375, -0.0281982421875, 0.055755615234375, 0.0019\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"MISTRALAI_API_KEY\"):\n",
|
||||
" os.environ[\"MISTRALAI_API_KEY\"] = getpass.getpass(\"Enter your MistralAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain MistralAI integration lives in the `langchain-mistralai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-mistralai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_mistralai import MistralAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = MistralAIEmbeddings(\n",
|
||||
" model=\"mistral-embed\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.04443359375, 0.01885986328125, 0.018035888671875, -0.00864410400390625, 0.049652099609375, -0.00\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.04443359375, 0.01885986328125, 0.0180511474609375, -0.0086517333984375, 0.049652099609375, -0.00\n",
|
||||
"[-0.02032470703125, 0.02606201171875, 0.051605224609375, -0.0281982421875, 0.055755615234375, 0.0019\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `MistralAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/mistralai/embeddings/langchain_mistralai.embeddings.MistralAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `MistralAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/mistralai/embeddings/langchain_mistralai.embeddings.MistralAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
323
docs/docs/integrations/text_embedding/netmind.ipynb
Normal file
323
docs/docs/integrations/text_embedding/netmind.ipynb
Normal file
@@ -0,0 +1,323 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Netmind\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# NetmindEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Netmind embedding models using LangChain. For detailed documentation on `NetmindEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Provider | Package |\n",
|
||||
"|:--------:|:-------:|\n",
|
||||
"| [Netmind](/docs/integrations/providers/netmind/) | [langchain-netmind](https://python.langchain.com/api_reference/) |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Netmind embedding models you'll need to create a/an Netmind account, get an API key, and install the `langchain-netmind` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to https://www.netmind.ai/ to sign up to Netmind and generate an API key. Once you've done this set the NETMIND_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "36521c2a",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T01:53:29.982962Z",
|
||||
"start_time": "2025-03-20T01:53:27.764291Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"NETMIND_API_KEY\"):\n",
|
||||
" os.environ[\"NETMIND_API_KEY\"] = getpass.getpass(\"Enter your Netmind API key: \")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 1
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "39a4953b",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T01:53:32.143687Z",
|
||||
"start_time": "2025-03-20T01:53:32.141858Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 2
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Netmind integration lives in the `langchain-netmind` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "64853226",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T01:53:38.639440Z",
|
||||
"start_time": "2025-03-20T01:53:36.171640Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"%pip install -qU langchain-netmind"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\r\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m24.0\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m25.0.1\u001B[0m\r\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\r\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 3
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T01:54:31.005334Z",
|
||||
"start_time": "2025-03-20T01:54:30.146876Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"from langchain_netmind import NetmindEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = NetmindEmbeddings(\n",
|
||||
" model=\"nvidia/NV-Embed-v2\",\n",
|
||||
")"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 4
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "d817716b",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T01:54:40.963137Z",
|
||||
"start_time": "2025-03-20T01:54:34.500805Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"execution_count": 5
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "0d2befcd",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T01:54:49.540750Z",
|
||||
"start_time": "2025-03-20T01:54:45.196528Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.0051240199245512486, -0.01726294495165348, 0.011966848745942116, -0.0018107350915670395, 0.01146\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 6
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-03-20T01:54:57.089847Z",
|
||||
"start_time": "2025-03-20T01:54:52.468719Z"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.0051240199245512486, -0.01726294495165348, 0.011966848745942116, -0.0018107350915670395, 0.01146\n",
|
||||
"[0.022523142397403717, -0.002223758026957512, -0.008578270673751831, -0.006029821466654539, 0.008752\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 7
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `NetmindEmbeddings` features and configuration options, please refer to the: \n",
|
||||
"* [API reference](https://python.langchain.com/api_reference/) \n",
|
||||
"* [langchain-netmind](https://github.com/protagolabs/langchain-netmind) \n",
|
||||
"* [pypi](https://pypi.org/project/langchain-netmind/)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"execution_count": null,
|
||||
"source": "",
|
||||
"id": "adb9e45c34733299"
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,287 +1,285 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Nomic\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# NomicEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Nomic embedding models using LangChain. For detailed documentation on `NomicEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/nomic/embeddings/langchain_nomic.embeddings.NomicEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Nomic\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Nomic embedding models you'll need to create a/an Nomic account, get an API key, and install the `langchain-nomic` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://atlas.nomic.ai/](https://atlas.nomic.ai/) to sign up to Nomic and generate an API key. Once you've done this set the `NOMIC_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"NOMIC_API_KEY\"):\n",
|
||||
" os.environ[\"NOMIC_API_KEY\"] = getpass.getpass(\"Enter your Nomic API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Nomic integration lives in the `langchain-nomic` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-nomic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = NomicEmbeddings(\n",
|
||||
" model=\"nomic-embed-text-v1.5\",\n",
|
||||
" # dimensionality=256,\n",
|
||||
" # Nomic's `nomic-embed-text-v1.5` model was [trained with Matryoshka learning](https://blog.nomic.ai/posts/nomic-embed-matryoshka)\n",
|
||||
" # to enable variable-length embeddings with a single model.\n",
|
||||
" # This means that you can specify the dimensionality of the embeddings at inference time.\n",
|
||||
" # The model supports dimensionality from 64 to 768.\n",
|
||||
" # inference_mode=\"remote\",\n",
|
||||
" # One of `remote`, `local` (Embed4All), or `dynamic` (automatic). Defaults to `remote`.\n",
|
||||
" # api_key=... , # if using remote inference,\n",
|
||||
" # device=\"cpu\",\n",
|
||||
" # The device to use for local embeddings. Choices include\n",
|
||||
" # `cpu`, `gpu`, `nvidia`, `amd`, or a specific device name. See\n",
|
||||
" # the docstring for `GPT4All.__init__` for more info. Typically\n",
|
||||
" # defaults to CPU. Do not use on macOS.\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Nomic\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.024642944, 0.029083252, -0.14013672, -0.09082031, 0.058898926, -0.07489014, -0.0138168335, 0.0037\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# NomicEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Nomic embedding models using LangChain. For detailed documentation on `NomicEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/nomic/embeddings/langchain_nomic.embeddings.NomicEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Nomic\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Nomic embedding models you'll need to create a/an Nomic account, get an API key, and install the `langchain-nomic` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://atlas.nomic.ai/](https://atlas.nomic.ai/) to sign up to Nomic and generate an API key. Once you've done this set the `NOMIC_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.012771606, 0.023727417, -0.12365723, -0.083740234, 0.06530762, -0.07110596, -0.021896362, -0.0068\n",
|
||||
"[-0.019058228, 0.04058838, -0.15222168, -0.06842041, -0.012130737, -0.07128906, -0.04534912, 0.00522\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"NOMIC_API_KEY\"):\n",
|
||||
" os.environ[\"NOMIC_API_KEY\"] = getpass.getpass(\"Enter your Nomic API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Nomic integration lives in the `langchain-nomic` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-nomic"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_nomic import NomicEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = NomicEmbeddings(\n",
|
||||
" model=\"nomic-embed-text-v1.5\",\n",
|
||||
" # dimensionality=256,\n",
|
||||
" # Nomic's `nomic-embed-text-v1.5` model was [trained with Matryoshka learning](https://blog.nomic.ai/posts/nomic-embed-matryoshka)\n",
|
||||
" # to enable variable-length embeddings with a single model.\n",
|
||||
" # This means that you can specify the dimensionality of the embeddings at inference time.\n",
|
||||
" # The model supports dimensionality from 64 to 768.\n",
|
||||
" # inference_mode=\"remote\",\n",
|
||||
" # One of `remote`, `local` (Embed4All), or `dynamic` (automatic). Defaults to `remote`.\n",
|
||||
" # api_key=... , # if using remote inference,\n",
|
||||
" # device=\"cpu\",\n",
|
||||
" # The device to use for local embeddings. Choices include\n",
|
||||
" # `cpu`, `gpu`, `nvidia`, `amd`, or a specific device name. See\n",
|
||||
" # the docstring for `GPT4All.__init__` for more info. Typically\n",
|
||||
" # defaults to CPU. Do not use on macOS.\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.024642944, 0.029083252, -0.14013672, -0.09082031, 0.058898926, -0.07489014, -0.0138168335, 0.0037\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.012771606, 0.023727417, -0.12365723, -0.083740234, 0.06530762, -0.07110596, -0.021896362, -0.0068\n",
|
||||
"[-0.019058228, 0.04058838, -0.15222168, -0.06842041, -0.012130737, -0.07128906, -0.04534912, 0.00522\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `NomicEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/nomic/embeddings/langchain_nomic.embeddings.NomicEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `NomicEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/nomic/embeddings/langchain_nomic.embeddings.NomicEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,276 +1,274 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Ollama\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OllamaEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Ollama embedding models using LangChain. For detailed documentation on `OllamaEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Ollama\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
"\n",
|
||||
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
|
||||
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
|
||||
" * e.g., `ollama pull llama3`\n",
|
||||
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
|
||||
"\n",
|
||||
"> On Mac, the models will be download to `~/.ollama/models`\n",
|
||||
"> \n",
|
||||
"> On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models`\n",
|
||||
"\n",
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"There is no built-in auth mechanism for Ollama."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Ollama integration lives in the `langchain-ollama` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate embeddings:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ollama import OllamaEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OllamaEmbeddings(\n",
|
||||
" model=\"llama3\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Ollama\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.001288981, 0.006547121, 0.018376578, 0.025603496, 0.009599175, -0.0042578303, -0.023250086, -0.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OllamaEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Ollama embedding models using LangChain. For detailed documentation on `OllamaEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Ollama\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
"\n",
|
||||
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
|
||||
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
|
||||
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
|
||||
" * e.g., `ollama pull llama3`\n",
|
||||
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
|
||||
"\n",
|
||||
"> On Mac, the models will be download to `~/.ollama/models`\n",
|
||||
">\n",
|
||||
"> On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models`\n",
|
||||
"\n",
|
||||
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
|
||||
"* To view all pulled models, use `ollama list`\n",
|
||||
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
|
||||
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"There is no built-in auth mechanism for Ollama."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.0013138362, 0.006438795, 0.018304596, 0.025530428, 0.009717592, -0.004225636, -0.023363983, -0.0\n",
|
||||
"[-0.010317663, 0.01632489, 0.0070348927, 0.017076202, 0.008924255, 0.007399284, -0.023064945, -0.003\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Ollama integration lives in the `langchain-ollama` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate embeddings:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ollama import OllamaEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OllamaEmbeddings(\n",
|
||||
" model=\"llama3\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.001288981, 0.006547121, 0.018376578, 0.025603496, 0.009599175, -0.0042578303, -0.023250086, -0.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.0013138362, 0.006438795, 0.018304596, 0.025530428, 0.009717592, -0.004225636, -0.023363983, -0.0\n",
|
||||
"[-0.010317663, 0.01632489, 0.0070348927, 0.017076202, 0.008924255, 0.007399284, -0.023064945, -0.003\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `OllamaEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `OllamaEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,272 +1,270 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: OpenAI\n",
|
||||
"keywords: [openaiembeddings]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with OpenAI embedding models using LangChain. For detailed documentation on `OpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"OpenAI\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access OpenAI embedding models you'll need to create a/an OpenAI account, get an API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [platform.openai.com](https://platform.openai.com) to sign up to OpenAI and generate an API key. Once you’ve done this set the OPENAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain OpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings(\n",
|
||||
" model=\"text-embedding-3-large\",\n",
|
||||
" # With the `text-embedding-3` class\n",
|
||||
" # of models, you can specify the size\n",
|
||||
" # of the embeddings you want returned.\n",
|
||||
" # dimensions=1024\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: OpenAI\n",
|
||||
"keywords: [openaiembeddings]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.019276829436421394, 0.0037708976306021214, -0.03294256329536438, 0.0037671267054975033, 0.008175\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with OpenAI embedding models using LangChain. For detailed documentation on `OpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"OpenAI\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access OpenAI embedding models you'll need to create a/an OpenAI account, get an API key, and install the `langchain-openai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [platform.openai.com](https://platform.openai.com) to sign up to OpenAI and generate an API key. Once you’ve done this set the OPENAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.019260549917817116, 0.0037612367887049913, -0.03291035071015358, 0.003757466096431017, 0.0082049\n",
|
||||
"[-0.010181212797760963, 0.023419594392180443, -0.04215526953339577, -0.001532090245746076, -0.023573\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain OpenAI integration lives in the `langchain-openai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings(\n",
|
||||
" model=\"text-embedding-3-large\",\n",
|
||||
" # With the `text-embedding-3` class\n",
|
||||
" # of models, you can specify the size\n",
|
||||
" # of the embeddings you want returned.\n",
|
||||
" # dimensions=1024\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.019276829436421394, 0.0037708976306021214, -0.03294256329536438, 0.0037671267054975033, 0.008175\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.019260549917817116, 0.0037612367887049913, -0.03291035071015358, 0.003757466096431017, 0.0082049\n",
|
||||
"[-0.010181212797760963, 0.023419594392180443, -0.04215526953339577, -0.001532090245746076, -0.023573\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `OpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `OpenAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/openai/embeddings/langchain_openai.embeddings.base.OpenAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,277 +1,275 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Together AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# TogetherEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Together embedding models using LangChain. For detailed documentation on `TogetherEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/together/embeddings/langchain_together.embeddings.TogetherEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Together\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Together embedding models you'll need to create a/an Together account, get an API key, and install the `langchain-together` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://api.together.xyz/](https://api.together.xyz/) to sign up to Together and generate an API key. Once you've done this set the TOGETHER_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"TOGETHER_API_KEY\"):\n",
|
||||
" os.environ[\"TOGETHER_API_KEY\"] = getpass.getpass(\"Enter your Together API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Together integration lives in the `langchain-together` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.2\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-together"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_together import TogetherEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = TogetherEmbeddings(\n",
|
||||
" model=\"togethercomputer/m2-bert-80M-8k-retrieval\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Together AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.3812227, -0.052848946, -0.10564975, 0.03480297, 0.2878488, 0.0084609175, 0.11605915, 0.05303011, \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# TogetherEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with Together embedding models using LangChain. For detailed documentation on `TogetherEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/together/embeddings/langchain_together.embeddings.TogetherEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Together\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Together embedding models you'll need to create a/an Together account, get an API key, and install the `langchain-together` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://api.together.xyz/](https://api.together.xyz/) to sign up to Together and generate an API key. Once you've done this set the TOGETHER_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.3812227, -0.052848946, -0.10564975, 0.03480297, 0.2878488, 0.0084609175, 0.11605915, 0.05303011, \n",
|
||||
"[0.066308185, -0.032866564, 0.115751594, 0.19082588, 0.14017, -0.26976448, -0.056340694, -0.26923394\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"TOGETHER_API_KEY\"):\n",
|
||||
" os.environ[\"TOGETHER_API_KEY\"] = getpass.getpass(\"Enter your Together API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Together integration lives in the `langchain-together` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.2\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU langchain-together"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_together import TogetherEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = TogetherEmbeddings(\n",
|
||||
" model=\"togethercomputer/m2-bert-80M-8k-retrieval\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.3812227, -0.052848946, -0.10564975, 0.03480297, 0.2878488, 0.0084609175, 0.11605915, 0.05303011, \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[0.3812227, -0.052848946, -0.10564975, 0.03480297, 0.2878488, 0.0084609175, 0.11605915, 0.05303011, \n",
|
||||
"[0.066308185, -0.032866564, 0.115751594, 0.19082588, 0.14017, -0.26976448, -0.056340694, -0.26923394\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `TogetherEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/together/embeddings/langchain_together.embeddings.TogetherEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `TogetherEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/together/embeddings/langchain_together.embeddings.TogetherEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,279 +1,277 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: ZhipuAI\n",
|
||||
"keywords: [zhipuaiembeddings]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ZhipuAIEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with ZhipuAI embedding models using LangChain. For detailed documentation on `ZhipuAIEmbeddings` features and configuration options, please refer to the [API reference](https://bigmodel.cn/dev/api#vector).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Provider | Package |\n",
|
||||
"|:--------:|:-------:|\n",
|
||||
"| [ZhipuAI](/docs/integrations/providers/zhipuai/) | [langchain-community](https://python.langchain.com/api_reference/community/embeddings/langchain_community.embeddings.zhipuai.ZhipuAIEmbeddings.html) |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access ZhipuAI embedding models you'll need to create a/an ZhipuAI account, get an API key, and install the `zhipuai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://bigmodel.cn/](https://bigmodel.cn/usercenter/apikeys) to sign up to ZhipuAI and generate an API key. Once you've done this set the ZHIPUAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"ZHIPUAI_API_KEY\"):\n",
|
||||
" os.environ[\"ZHIPUAI_API_KEY\"] = getpass.getpass(\"Enter your ZhipuAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain ZhipuAI integration lives in the `zhipuai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU zhipuai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import ZhipuAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = ZhipuAIEmbeddings(\n",
|
||||
" model=\"embedding-3\",\n",
|
||||
" # With the `embedding-3` class\n",
|
||||
" # of models, you can specify the size\n",
|
||||
" # of the embeddings you want returned.\n",
|
||||
" # dimensions=1024\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: ZhipuAI\n",
|
||||
"keywords: [zhipuaiembeddings]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.022979736, 0.007785797, 0.04598999, 0.012741089, -0.01689148, 0.008277893, 0.016464233, 0.009246\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"id": "9a3d6f34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ZhipuAIEmbeddings\n",
|
||||
"\n",
|
||||
"This will help you get started with ZhipuAI embedding models using LangChain. For detailed documentation on `ZhipuAIEmbeddings` features and configuration options, please refer to the [API reference](https://bigmodel.cn/dev/api#vector).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Provider | Package |\n",
|
||||
"|:--------:|:-------:|\n",
|
||||
"| [ZhipuAI](/docs/integrations/providers/zhipuai/) | [langchain-community](https://python.langchain.com/api_reference/community/embeddings/langchain_community.embeddings.zhipuai.ZhipuAIEmbeddings.html) |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access ZhipuAI embedding models you'll need to create a/an ZhipuAI account, get an API key, and install the `zhipuai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to [https://bigmodel.cn/](https://bigmodel.cn/usercenter/apikeys) to sign up to ZhipuAI and generate an API key. Once you've done this set the ZHIPUAI_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.022979736, 0.007785797, 0.04598999, 0.012741089, -0.01689148, 0.008277893, 0.016464233, 0.009246\n",
|
||||
"[-0.02330017, -0.013916016, 0.00022411346, 0.017196655, -0.034240723, 0.011131287, 0.011497498, -0.0\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "36521c2a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"ZHIPUAI_API_KEY\"):\n",
|
||||
" os.environ[\"ZHIPUAI_API_KEY\"] = getpass.getpass(\"Enter your ZhipuAI API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c84fb993",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "39a4953b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9664366",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain ZhipuAI integration lives in the `zhipuai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "64853226",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install -qU zhipuai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45dd1724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "9ea7a09b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import ZhipuAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = ZhipuAIEmbeddings(\n",
|
||||
" model=\"embedding-3\",\n",
|
||||
" # With the `embedding-3` class\n",
|
||||
" # of models, you can specify the size\n",
|
||||
" # of the embeddings you want returned.\n",
|
||||
" # dimensions=1024\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77d271b6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "d817716b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e02b9855",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Direct Usage\n",
|
||||
"\n",
|
||||
"Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
|
||||
"\n",
|
||||
"You can directly call these methods to get embeddings for your own use cases.\n",
|
||||
"\n",
|
||||
"### Embed single texts\n",
|
||||
"\n",
|
||||
"You can embed single texts or documents with `embed_query`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0d2befcd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.022979736, 0.007785797, 0.04598999, 0.012741089, -0.01689148, 0.008277893, 0.016464233, 0.009246\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"single_vector = embeddings.embed_query(text)\n",
|
||||
"print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b5a7d03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Embed multiple texts\n",
|
||||
"\n",
|
||||
"You can embed multiple texts with `embed_documents`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2f4d6e97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.022979736, 0.007785797, 0.04598999, 0.012741089, -0.01689148, 0.008277893, 0.016464233, 0.009246\n",
|
||||
"[-0.02330017, -0.013916016, 0.00022411346, 0.017196655, -0.034240723, 0.011131287, 0.011497498, -0.0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `ZhipuAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/community/embeddings/langchain_community.embeddings.zhipuai.ZhipuAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text2 = (\n",
|
||||
" \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
|
||||
")\n",
|
||||
"two_vectors = embeddings.embed_documents([text, text2])\n",
|
||||
"for vector in two_vectors:\n",
|
||||
" print(str(vector)[:100]) # Show the first 100 characters of the vector"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "98785c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `ZhipuAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/community/embeddings/langchain_community.embeddings.zhipuai.ZhipuAIEmbeddings.html).\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
"Gathers all schema information for the connected database or a specific schema. Critical for the agent when determining actions. \n",
|
||||
"\n",
|
||||
"### `cassandra_db_select_table_data`\n",
|
||||
"Selects data from a specific keyspace and table. The agent can pass paramaters for a predicate and limits on the number of returned records. \n",
|
||||
"Selects data from a specific keyspace and table. The agent can pass parameters for a predicate and limits on the number of returned records. \n",
|
||||
"\n",
|
||||
"### `cassandra_db_query`\n",
|
||||
"Expiriemental alternative to `cassandra_db_select_table_data` which takes a query string completely formed by the agent instead of parameters. *Warning*: This can lead to unusual queries that may not be as performant(or even work). This may be removed in future releases. If it does something cool, we want to know about that too. You never know!"
|
||||
|
||||
@@ -1,332 +1,330 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: CDP\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: CDP\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CDP Agentkit Toolkit\n",
|
||||
"\n",
|
||||
"The `CDP Agentkit` toolkit contains tools that enable an LLM agent to interact with the [Coinbase Developer Platform](https://docs.cdp.coinbase.com/). The toolkit provides a wrapper around the CDP SDK, allowing agents to perform onchain operations like transfers, trades, and smart contract interactions.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | JS support | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| CdpToolkit | `cdp-langchain` | ❌ | ❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"The toolkit provides the following tools:\n",
|
||||
"\n",
|
||||
"1. **get_wallet_details** - Get details about the MPC Wallet\n",
|
||||
"2. **get_balance** - Get balance for specific assets\n",
|
||||
"3. **request_faucet_funds** - Request test tokens from faucet\n",
|
||||
"4. **transfer** - Transfer assets between addresses\n",
|
||||
"5. **trade** - Trade assets (Mainnet only)\n",
|
||||
"6. **deploy_token** - Deploy ERC-20 token contracts\n",
|
||||
"7. **mint_nft** - Mint NFTs from existing contracts\n",
|
||||
"8. **deploy_nft** - Deploy new NFT contracts\n",
|
||||
"9. **register_basename** - Register a basename for the wallet\n",
|
||||
"\n",
|
||||
"We encourage you to add your own tools, both using CDP and web2 APIs, to create an agent that is tailored to your needs.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"At a high-level, we will:\n",
|
||||
"\n",
|
||||
"1. Install the langchain package\n",
|
||||
"2. Set up your CDP API credentials\n",
|
||||
"3. Initialize the CDP wrapper and toolkit\n",
|
||||
"4. Pass the tools to your agent with `toolkit.get_tools()`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of individual tools, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `cdp-langchain` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU cdp-langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Set Environment Variables\n",
|
||||
"\n",
|
||||
"To use this toolkit, you must first set the following environment variables to access the [CDP APIs](https://docs.cdp.coinbase.com/mpc-wallet/docs/quickstart) to create wallets and interact onchain. You can sign up for an API key for free on the [CDP Portal](https://cdp.coinbase.com/):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"for env_var in [\n",
|
||||
" \"CDP_API_KEY_NAME\",\n",
|
||||
" \"CDP_API_KEY_PRIVATE_KEY\",\n",
|
||||
"]:\n",
|
||||
" if not os.getenv(env_var):\n",
|
||||
" os.environ[env_var] = getpass.getpass(f\"Enter your {env_var}: \")\n",
|
||||
"\n",
|
||||
"# Optional: Set network (defaults to base-sepolia)\n",
|
||||
"os.environ[\"NETWORK_ID\"] = \"base-sepolia\" # or \"base-mainnet\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c5f2839",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our toolkit:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51a60dbe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from cdp_langchain.agent_toolkits import CdpToolkit\n",
|
||||
"from cdp_langchain.utils import CdpAgentkitWrapper\n",
|
||||
"\n",
|
||||
"# Initialize CDP wrapper\n",
|
||||
"cdp = CdpAgentkitWrapper()\n",
|
||||
"\n",
|
||||
"# Create toolkit from wrapper\n",
|
||||
"toolkit = CdpToolkit.from_cdp_agentkit_wrapper(cdp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d11245ad",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View [available tools](#tool-features):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "310bf18e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"for tool in tools:\n",
|
||||
" print(tool.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23e11cc9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d1ee55bc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize the agent with the tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f8a2c4b1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4a7c9d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Example usage:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c9a8e4f3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"Send 0.005 ETH to john2879.base.eth\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e5a7c9d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Expected output:\n",
|
||||
"```\n",
|
||||
"Transferred 0.005 of eth to john2879.base.eth.\n",
|
||||
"Transaction hash for the transfer: 0x78c7c2878659a0de216d0764fc87eff0d38b47f3315fa02ba493a83d8e782d1e\n",
|
||||
"Transaction link for the transfer: https://sepolia.basescan.org/tx/0x78c7c2878659a0de216d0764fc87eff0d38b47f3315fa02ba493a83d8e782d1\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5a7c9d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## CDP Toolkit Specific Features\n",
|
||||
"\n",
|
||||
"### Wallet Management\n",
|
||||
"\n",
|
||||
"The toolkit maintains an MPC wallet. The wallet data can be exported and imported to persist between sessions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "g5a7c9d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Export wallet data\n",
|
||||
"wallet_data = cdp.export_wallet()\n",
|
||||
"\n",
|
||||
"# Import wallet data\n",
|
||||
"values = {\"cdp_wallet_data\": wallet_data}\n",
|
||||
"cdp = CdpAgentkitWrapper(**values)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "h5a7c9d7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Network Support\n",
|
||||
"\n",
|
||||
"The toolkit supports [multiple networks](https://docs.cdp.coinbase.com/cdp-sdk/docs/networks)\n",
|
||||
"\n",
|
||||
"### Gasless Transactions\n",
|
||||
"\n",
|
||||
"Some operations support gasless transactions on Base Mainnet:\n",
|
||||
"- USDC transfers\n",
|
||||
"- EURC transfers\n",
|
||||
"- cbBTC transfers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "i5a7c9d8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all CDP features and configurations head to the [CDP docs](https://docs.cdp.coinbase.com/mpc-wallet/docs/welcome)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CDP Agentkit Toolkit\n",
|
||||
"\n",
|
||||
"The `CDP Agentkit` toolkit contains tools that enable an LLM agent to interact with the [Coinbase Developer Platform](https://docs.cdp.coinbase.com/). The toolkit provides a wrapper around the CDP SDK, allowing agents to perform onchain operations like transfers, trades, and smart contract interactions.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Serializable | JS support | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| CdpToolkit | `cdp-langchain` | ❌ | ❌ |  |\n",
|
||||
"\n",
|
||||
"### Tool features\n",
|
||||
"\n",
|
||||
"The toolkit provides the following tools:\n",
|
||||
"\n",
|
||||
"1. **get_wallet_details** - Get details about the MPC Wallet\n",
|
||||
"2. **get_balance** - Get balance for specific assets\n",
|
||||
"3. **request_faucet_funds** - Request test tokens from faucet\n",
|
||||
"4. **transfer** - Transfer assets between addresses\n",
|
||||
"5. **trade** - Trade assets (Mainnet only)\n",
|
||||
"6. **deploy_token** - Deploy ERC-20 token contracts\n",
|
||||
"7. **mint_nft** - Mint NFTs from existing contracts\n",
|
||||
"8. **deploy_nft** - Deploy new NFT contracts\n",
|
||||
"9. **register_basename** - Register a basename for the wallet\n",
|
||||
"\n",
|
||||
"We encourage you to add your own tools, both using CDP and web2 APIs, to create an agent that is tailored to your needs.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"At a high-level, we will:\n",
|
||||
"\n",
|
||||
"1. Install the langchain package\n",
|
||||
"2. Set up your CDP API credentials\n",
|
||||
"3. Initialize the CDP wrapper and toolkit\n",
|
||||
"4. Pass the tools to your agent with `toolkit.get_tools()`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `cdp-langchain` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU cdp-langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Set Environment Variables\n",
|
||||
"\n",
|
||||
"To use this toolkit, you must first set the following environment variables to access the [CDP APIs](https://docs.cdp.coinbase.com/mpc-wallet/docs/quickstart) to create wallets and interact onchain. You can sign up for an API key for free on the [CDP Portal](https://cdp.coinbase.com/):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"for env_var in [\n",
|
||||
" \"CDP_API_KEY_NAME\",\n",
|
||||
" \"CDP_API_KEY_PRIVATE_KEY\",\n",
|
||||
"]:\n",
|
||||
" if not os.getenv(env_var):\n",
|
||||
" os.environ[env_var] = getpass.getpass(f\"Enter your {env_var}: \")\n",
|
||||
"\n",
|
||||
"# Optional: Set network (defaults to base-sepolia)\n",
|
||||
"os.environ[\"NETWORK_ID\"] = \"base-sepolia\" # or \"base-mainnet\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c5f2839",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our toolkit:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51a60dbe",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from cdp_langchain.agent_toolkits import CdpToolkit\n",
|
||||
"from cdp_langchain.utils import CdpAgentkitWrapper\n",
|
||||
"\n",
|
||||
"# Initialize CDP wrapper\n",
|
||||
"cdp = CdpAgentkitWrapper()\n",
|
||||
"\n",
|
||||
"# Create toolkit from wrapper\n",
|
||||
"toolkit = CdpToolkit.from_cdp_agentkit_wrapper(cdp)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d11245ad",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View [available tools](#tool-features):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "310bf18e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"for tool in tools:\n",
|
||||
" print(tool.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "23e11cc9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d1ee55bc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize the agent with the tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f8a2c4b1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4a7c9d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Example usage:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c9a8e4f3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"example_query = \"Send 0.005 ETH to john2879.base.eth\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e5a7c9d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Expected output:\n",
|
||||
"```\n",
|
||||
"Transferred 0.005 of eth to john2879.base.eth.\n",
|
||||
"Transaction hash for the transfer: 0x78c7c2878659a0de216d0764fc87eff0d38b47f3315fa02ba493a83d8e782d1e\n",
|
||||
"Transaction link for the transfer: https://sepolia.basescan.org/tx/0x78c7c2878659a0de216d0764fc87eff0d38b47f3315fa02ba493a83d8e782d1\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5a7c9d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## CDP Toolkit Specific Features\n",
|
||||
"\n",
|
||||
"### Wallet Management\n",
|
||||
"\n",
|
||||
"The toolkit maintains an MPC wallet. The wallet data can be exported and imported to persist between sessions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "g5a7c9d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Export wallet data\n",
|
||||
"wallet_data = cdp.export_wallet()\n",
|
||||
"\n",
|
||||
"# Import wallet data\n",
|
||||
"values = {\"cdp_wallet_data\": wallet_data}\n",
|
||||
"cdp = CdpAgentkitWrapper(**values)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "h5a7c9d7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Network Support\n",
|
||||
"\n",
|
||||
"The toolkit supports [multiple networks](https://docs.cdp.coinbase.com/cdp-sdk/docs/networks)\n",
|
||||
"\n",
|
||||
"### Gasless Transactions\n",
|
||||
"\n",
|
||||
"Some operations support gasless transactions on Base Mainnet:\n",
|
||||
"- USDC transfers\n",
|
||||
"- EURC transfers\n",
|
||||
"- cbBTC transfers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "i5a7c9d8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all CDP features and configurations head to the [CDP docs](https://docs.cdp.coinbase.com/mpc-wallet/docs/welcome)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,360 +1,358 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Github Toolkit\n",
|
||||
"\n",
|
||||
"The `Github` toolkit contains tools that enable an LLM agent to interact with a github repository. \n",
|
||||
"The tool is a wrapper for the [PyGitHub](https://github.com/PyGithub/PyGithub) library. \n",
|
||||
"\n",
|
||||
"For detailed documentation of all GithubToolkit features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.github.toolkit.GitHubToolkit.html).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"At a high-level, we will:\n",
|
||||
"\n",
|
||||
"1. Install the pygithub library\n",
|
||||
"2. Create a Github app\n",
|
||||
"3. Set your environmental variables\n",
|
||||
"4. Pass the tools to your agent with `toolkit.get_tools()`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"#### 1. Install dependencies\n",
|
||||
"\n",
|
||||
"This integration is implemented in `langchain-community`. We will also need the `pygithub` dependency:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet pygithub langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2. Create a Github App\n",
|
||||
"\n",
|
||||
"[Follow the instructions here](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app) to create and register a Github app. Make sure your app has the following [repository permissions:](https://docs.github.com/en/rest/overview/permissions-required-for-github-apps?apiVersion=2022-11-28)\n",
|
||||
"\n",
|
||||
"* Commit statuses (read only)\n",
|
||||
"* Contents (read and write)\n",
|
||||
"* Issues (read and write)\n",
|
||||
"* Metadata (read only)\n",
|
||||
"* Pull requests (read and write)\n",
|
||||
"\n",
|
||||
"Once the app has been registered, you must give your app permission to access each of the repositories you whish it to act upon. Use the App settings on [github.com here](https://github.com/settings/installations).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#### 3. Set Environment Variables\n",
|
||||
"\n",
|
||||
"Before initializing your agent, the following environment variables need to be set:\n",
|
||||
"\n",
|
||||
"* **GITHUB_APP_ID**- A six digit number found in your app's general settings\n",
|
||||
"* **GITHUB_APP_PRIVATE_KEY**- The location of your app's private key .pem file, or the full text of that file as a string.\n",
|
||||
"* **GITHUB_REPOSITORY**- The name of the Github repository you want your bot to act upon. Must follow the format \\{username\\}/\\{repo-name\\}. *Make sure the app has been added to this repository first!*\n",
|
||||
"* Optional: **GITHUB_BRANCH**- The branch where the bot will make its commits. Defaults to `repo.default_branch`.\n",
|
||||
"* Optional: **GITHUB_BASE_BRANCH**- The base branch of your repo upon which PRs will based from. Defaults to `repo.default_branch`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"for env_var in [\n",
|
||||
" \"GITHUB_APP_ID\",\n",
|
||||
" \"GITHUB_APP_PRIVATE_KEY\",\n",
|
||||
" \"GITHUB_REPOSITORY\",\n",
|
||||
"]:\n",
|
||||
" if not os.getenv(env_var):\n",
|
||||
" os.environ[env_var] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our toolkit:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit\n",
|
||||
"from langchain_community.utilities.github import GitHubAPIWrapper\n",
|
||||
"\n",
|
||||
"github = GitHubAPIWrapper()\n",
|
||||
"toolkit = GitHubToolkit.from_github_api_wrapper(github)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View available tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Get Issues\n",
|
||||
"Get Issue\n",
|
||||
"Comment on Issue\n",
|
||||
"List open pull requests (PRs)\n",
|
||||
"Get Pull Request\n",
|
||||
"Overview of files included in PR\n",
|
||||
"Create Pull Request\n",
|
||||
"List Pull Requests' Files\n",
|
||||
"Create File\n",
|
||||
"Read File\n",
|
||||
"Update File\n",
|
||||
"Delete File\n",
|
||||
"Overview of existing files in Main branch\n",
|
||||
"Overview of files in current working branch\n",
|
||||
"List branches in this repository\n",
|
||||
"Set active branch\n",
|
||||
"Create a new branch\n",
|
||||
"Get files from a directory\n",
|
||||
"Search issues and pull requests\n",
|
||||
"Search code\n",
|
||||
"Create review request\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"for tool in tools:\n",
|
||||
" print(tool.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The purpose of these tools is as follows:\n",
|
||||
"\n",
|
||||
"Each of these steps will be explained in great detail below.\n",
|
||||
"\n",
|
||||
"1. **Get Issues**- fetches issues from the repository.\n",
|
||||
"\n",
|
||||
"2. **Get Issue**- fetches details about a specific issue.\n",
|
||||
"\n",
|
||||
"3. **Comment on Issue**- posts a comment on a specific issue.\n",
|
||||
"\n",
|
||||
"4. **Create Pull Request**- creates a pull request from the bot's working branch to the base branch.\n",
|
||||
"\n",
|
||||
"5. **Create File**- creates a new file in the repository.\n",
|
||||
"\n",
|
||||
"6. **Read File**- reads a file from the repository.\n",
|
||||
"\n",
|
||||
"7. **Update File**- updates a file in the repository.\n",
|
||||
"\n",
|
||||
"8. **Delete File**- deletes a file from the repository."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Include release tools\n",
|
||||
"\n",
|
||||
"By default, the toolkit does not include release-related tools. You can include them by setting `include_release_tools=True` when initializing the toolkit:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"toolkit = GitHubToolkit.from_github_api_wrapper(github, include_release_tools=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Settings `include_release_tools=True` will include the following tools:\n",
|
||||
"\n",
|
||||
"* **Get Latest Release**- fetches the latest release from the repository.\n",
|
||||
"\n",
|
||||
"* **Get Releases**- fetches the latest 5 releases from the repository.\n",
|
||||
"\n",
|
||||
"* **Get Release**- fetches a specific release from the repository by tag name, e.g. `v1.0.0`.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize the agent with a subset of tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool for tool in toolkit.get_tools() if tool.name == \"Get Issue\"]\n",
|
||||
"assert len(tools) == 1\n",
|
||||
"tools[0].name = \"get_issue\"\n",
|
||||
"\n",
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And issue it a query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Github Toolkit\n",
|
||||
"\n",
|
||||
"The `Github` toolkit contains tools that enable an LLM agent to interact with a github repository. \n",
|
||||
"The tool is a wrapper for the [PyGitHub](https://github.com/PyGithub/PyGithub) library. \n",
|
||||
"\n",
|
||||
"For detailed documentation of all GithubToolkit features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.github.toolkit.GitHubToolkit.html).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"At a high-level, we will:\n",
|
||||
"\n",
|
||||
"1. Install the pygithub library\n",
|
||||
"2. Create a Github app\n",
|
||||
"3. Set your environmental variables\n",
|
||||
"4. Pass the tools to your agent with `toolkit.get_tools()`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What is the title of issue 24888?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" get_issue (call_iSYJVaM7uchfNHOMJoVPQsOi)\n",
|
||||
" Call ID: call_iSYJVaM7uchfNHOMJoVPQsOi\n",
|
||||
" Args:\n",
|
||||
" issue_number: 24888\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: get_issue\n",
|
||||
"\n",
|
||||
"{\"number\": 24888, \"title\": \"Standardize KV-Store Docs\", \"body\": \"To make our KV-store integrations as easy to use as possible we need to make sure the docs for them are thorough and standardized. There are two parts to this: updating the KV-store docstrings and updating the actual integration docs.\\r\\n\\r\\nThis needs to be done for each KV-store integration, ideally with one PR per KV-store.\\r\\n\\r\\nRelated to broader issues #21983 and #22005.\\r\\n\\r\\n## Docstrings\\r\\nEach KV-store class docstring should have the sections shown in the [Appendix](#appendix) below. The sections should have input and output code blocks when relevant.\\r\\n\\r\\nTo build a preview of the API docs for the package you're working on run (from root of repo):\\r\\n\\r\\n```shell\\r\\nmake api_docs_clean; make api_docs_quick_preview API_PKG=openai\\r\\n```\\r\\n\\r\\nwhere `API_PKG=` should be the parent directory that houses the edited package (e.g. community, openai, anthropic, huggingface, together, mistralai, groq, fireworks, etc.). This should be quite fast for all the partner packages.\\r\\n\\r\\n## Doc pages\\r\\nEach KV-store [docs page](https://python.langchain.com/docs/integrations/stores/) should follow [this template](https://github.com/langchain-ai/langchain/blob/master/libs/cli/langchain_cli/integration_template/docs/kv_store.ipynb).\\r\\n\\r\\nHere is an example: https://python.langchain.com/docs/integrations/stores/in_memory/\\r\\n\\r\\nYou can use the `langchain-cli` to quickly get started with a new chat model integration docs page (run from root of repo):\\r\\n\\r\\n```shell\\r\\npoetry run pip install -e libs/cli\\r\\npoetry run langchain-cli integration create-doc --name \\\"foo-bar\\\" --name-class FooBar --component-type kv_store --destination-dir ./docs/docs/integrations/stores/\\r\\n```\\r\\n\\r\\nwhere `--name` is the integration package name without the \\\"langchain-\\\" prefix and `--name-class` is the class name without the \\\"ByteStore\\\" suffix. This will create a template doc with some autopopulated fields at docs/docs/integrations/stores/foo_bar.ipynb.\\r\\n\\r\\nTo build a preview of the docs you can run (from root):\\r\\n\\r\\n```shell\\r\\nmake docs_clean\\r\\nmake docs_build\\r\\ncd docs/build/output-new\\r\\nyarn\\r\\nyarn start\\r\\n```\\r\\n\\r\\n## Appendix\\r\\nExpected sections for the KV-store class docstring.\\r\\n\\r\\n```python\\r\\n \\\"\\\"\\\"__ModuleName__ completion KV-store integration.\\r\\n\\r\\n # TODO: Replace with relevant packages, env vars.\\r\\n Setup:\\r\\n Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``.\\r\\n\\r\\n .. code-block:: bash\\r\\n\\r\\n pip install -U __package_name__\\r\\n export __MODULE_NAME___API_KEY=\\\"your-api-key\\\"\\r\\n\\r\\n # TODO: Populate with relevant params.\\r\\n Key init args \\u2014 client params:\\r\\n api_key: Optional[str]\\r\\n __ModuleName__ API key. If not passed in will be read from env var __MODULE_NAME___API_KEY.\\r\\n\\r\\n See full list of supported init args and their descriptions in the params section.\\r\\n\\r\\n # TODO: Replace with relevant init params.\\r\\n Instantiate:\\r\\n .. code-block:: python\\r\\n\\r\\n from __module_name__ import __ModuleName__ByteStore\\r\\n\\r\\n kv_store = __ModuleName__ByteStore(\\r\\n # api_key=\\\"...\\\",\\r\\n # other params...\\r\\n )\\r\\n\\r\\n Set keys:\\r\\n .. code-block:: python\\r\\n\\r\\n kv_pairs = [\\r\\n [\\\"key1\\\", \\\"value1\\\"],\\r\\n [\\\"key2\\\", \\\"value2\\\"],\\r\\n ]\\r\\n\\r\\n kv_store.mset(kv_pairs)\\r\\n\\r\\n .. code-block:: python\\r\\n\\r\\n Get keys:\\r\\n .. code-block:: python\\r\\n\\r\\n kv_store.mget([\\\"key1\\\", \\\"key2\\\"])\\r\\n\\r\\n .. code-block:: python\\r\\n\\r\\n # TODO: Example output.\\r\\n\\r\\n Delete keys:\\r\\n ..code-block:: python\\r\\n\\r\\n kv_store.mdelete([\\\"key1\\\", \\\"key2\\\"])\\r\\n\\r\\n ..code-block:: python\\r\\n \\\"\\\"\\\" # noqa: E501\\r\\n```\", \"comments\": \"[]\", \"opened_by\": \"jacoblee93\"}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The title of issue 24888 is \"Standardize KV-Store Docs\".\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of individual tools, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"#### 1. Install dependencies\n",
|
||||
"\n",
|
||||
"This integration is implemented in `langchain-community`. We will also need the `pygithub` dependency:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet pygithub langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### 2. Create a Github App\n",
|
||||
"\n",
|
||||
"[Follow the instructions here](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app) to create and register a Github app. Make sure your app has the following [repository permissions:](https://docs.github.com/en/rest/overview/permissions-required-for-github-apps?apiVersion=2022-11-28)\n",
|
||||
"\n",
|
||||
"* Commit statuses (read only)\n",
|
||||
"* Contents (read and write)\n",
|
||||
"* Issues (read and write)\n",
|
||||
"* Metadata (read only)\n",
|
||||
"* Pull requests (read and write)\n",
|
||||
"\n",
|
||||
"Once the app has been registered, you must give your app permission to access each of the repositories you whish it to act upon. Use the App settings on [github.com here](https://github.com/settings/installations).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#### 3. Set Environment Variables\n",
|
||||
"\n",
|
||||
"Before initializing your agent, the following environment variables need to be set:\n",
|
||||
"\n",
|
||||
"* **GITHUB_APP_ID**- A six digit number found in your app's general settings\n",
|
||||
"* **GITHUB_APP_PRIVATE_KEY**- The location of your app's private key .pem file, or the full text of that file as a string.\n",
|
||||
"* **GITHUB_REPOSITORY**- The name of the Github repository you want your bot to act upon. Must follow the format \\{username\\}/\\{repo-name\\}. *Make sure the app has been added to this repository first!*\n",
|
||||
"* Optional: **GITHUB_BRANCH**- The branch where the bot will make its commits. Defaults to `repo.default_branch`.\n",
|
||||
"* Optional: **GITHUB_BASE_BRANCH**- The base branch of your repo upon which PRs will based from. Defaults to `repo.default_branch`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"for env_var in [\n",
|
||||
" \"GITHUB_APP_ID\",\n",
|
||||
" \"GITHUB_APP_PRIVATE_KEY\",\n",
|
||||
" \"GITHUB_REPOSITORY\",\n",
|
||||
"]:\n",
|
||||
" if not os.getenv(env_var):\n",
|
||||
" os.environ[env_var] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our toolkit:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit\n",
|
||||
"from langchain_community.utilities.github import GitHubAPIWrapper\n",
|
||||
"\n",
|
||||
"github = GitHubAPIWrapper()\n",
|
||||
"toolkit = GitHubToolkit.from_github_api_wrapper(github)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View available tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Get Issues\n",
|
||||
"Get Issue\n",
|
||||
"Comment on Issue\n",
|
||||
"List open pull requests (PRs)\n",
|
||||
"Get Pull Request\n",
|
||||
"Overview of files included in PR\n",
|
||||
"Create Pull Request\n",
|
||||
"List Pull Requests' Files\n",
|
||||
"Create File\n",
|
||||
"Read File\n",
|
||||
"Update File\n",
|
||||
"Delete File\n",
|
||||
"Overview of existing files in Main branch\n",
|
||||
"Overview of files in current working branch\n",
|
||||
"List branches in this repository\n",
|
||||
"Set active branch\n",
|
||||
"Create a new branch\n",
|
||||
"Get files from a directory\n",
|
||||
"Search issues and pull requests\n",
|
||||
"Search code\n",
|
||||
"Create review request\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"for tool in tools:\n",
|
||||
" print(tool.name)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The purpose of these tools is as follows:\n",
|
||||
"\n",
|
||||
"Each of these steps will be explained in great detail below.\n",
|
||||
"\n",
|
||||
"1. **Get Issues**- fetches issues from the repository.\n",
|
||||
"\n",
|
||||
"2. **Get Issue**- fetches details about a specific issue.\n",
|
||||
"\n",
|
||||
"3. **Comment on Issue**- posts a comment on a specific issue.\n",
|
||||
"\n",
|
||||
"4. **Create Pull Request**- creates a pull request from the bot's working branch to the base branch.\n",
|
||||
"\n",
|
||||
"5. **Create File**- creates a new file in the repository.\n",
|
||||
"\n",
|
||||
"6. **Read File**- reads a file from the repository.\n",
|
||||
"\n",
|
||||
"7. **Update File**- updates a file in the repository.\n",
|
||||
"\n",
|
||||
"8. **Delete File**- deletes a file from the repository."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Include release tools\n",
|
||||
"\n",
|
||||
"By default, the toolkit does not include release-related tools. You can include them by setting `include_release_tools=True` when initializing the toolkit:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"toolkit = GitHubToolkit.from_github_api_wrapper(github, include_release_tools=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Settings `include_release_tools=True` will include the following tools:\n",
|
||||
"\n",
|
||||
"* **Get Latest Release**- fetches the latest release from the repository.\n",
|
||||
"\n",
|
||||
"* **Get Releases**- fetches the latest 5 releases from the repository.\n",
|
||||
"\n",
|
||||
"* **Get Release**- fetches a specific release from the repository by tag name, e.g. `v1.0.0`.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Initialize the agent with a subset of tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [tool for tool in toolkit.get_tools() if tool.name == \"Get Issue\"]\n",
|
||||
"assert len(tools) == 1\n",
|
||||
"tools[0].name = \"get_issue\"\n",
|
||||
"\n",
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And issue it a query:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What is the title of issue 24888?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" get_issue (call_iSYJVaM7uchfNHOMJoVPQsOi)\n",
|
||||
" Call ID: call_iSYJVaM7uchfNHOMJoVPQsOi\n",
|
||||
" Args:\n",
|
||||
" issue_number: 24888\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: get_issue\n",
|
||||
"\n",
|
||||
"{\"number\": 24888, \"title\": \"Standardize KV-Store Docs\", \"body\": \"To make our KV-store integrations as easy to use as possible we need to make sure the docs for them are thorough and standardized. There are two parts to this: updating the KV-store docstrings and updating the actual integration docs.\\r\\n\\r\\nThis needs to be done for each KV-store integration, ideally with one PR per KV-store.\\r\\n\\r\\nRelated to broader issues #21983 and #22005.\\r\\n\\r\\n## Docstrings\\r\\nEach KV-store class docstring should have the sections shown in the [Appendix](#appendix) below. The sections should have input and output code blocks when relevant.\\r\\n\\r\\nTo build a preview of the API docs for the package you're working on run (from root of repo):\\r\\n\\r\\n```shell\\r\\nmake api_docs_clean; make api_docs_quick_preview API_PKG=openai\\r\\n```\\r\\n\\r\\nwhere `API_PKG=` should be the parent directory that houses the edited package (e.g. community, openai, anthropic, huggingface, together, mistralai, groq, fireworks, etc.). This should be quite fast for all the partner packages.\\r\\n\\r\\n## Doc pages\\r\\nEach KV-store [docs page](https://python.langchain.com/docs/integrations/stores/) should follow [this template](https://github.com/langchain-ai/langchain/blob/master/libs/cli/langchain_cli/integration_template/docs/kv_store.ipynb).\\r\\n\\r\\nHere is an example: https://python.langchain.com/docs/integrations/stores/in_memory/\\r\\n\\r\\nYou can use the `langchain-cli` to quickly get started with a new chat model integration docs page (run from root of repo):\\r\\n\\r\\n```shell\\r\\npoetry run pip install -e libs/cli\\r\\npoetry run langchain-cli integration create-doc --name \\\"foo-bar\\\" --name-class FooBar --component-type kv_store --destination-dir ./docs/docs/integrations/stores/\\r\\n```\\r\\n\\r\\nwhere `--name` is the integration package name without the \\\"langchain-\\\" prefix and `--name-class` is the class name without the \\\"ByteStore\\\" suffix. This will create a template doc with some autopopulated fields at docs/docs/integrations/stores/foo_bar.ipynb.\\r\\n\\r\\nTo build a preview of the docs you can run (from root):\\r\\n\\r\\n```shell\\r\\nmake docs_clean\\r\\nmake docs_build\\r\\ncd docs/build/output-new\\r\\nyarn\\r\\nyarn start\\r\\n```\\r\\n\\r\\n## Appendix\\r\\nExpected sections for the KV-store class docstring.\\r\\n\\r\\n```python\\r\\n \\\"\\\"\\\"__ModuleName__ completion KV-store integration.\\r\\n\\r\\n # TODO: Replace with relevant packages, env vars.\\r\\n Setup:\\r\\n Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``.\\r\\n\\r\\n .. code-block:: bash\\r\\n\\r\\n pip install -U __package_name__\\r\\n export __MODULE_NAME___API_KEY=\\\"your-api-key\\\"\\r\\n\\r\\n # TODO: Populate with relevant params.\\r\\n Key init args \\u2014 client params:\\r\\n api_key: Optional[str]\\r\\n __ModuleName__ API key. If not passed in will be read from env var __MODULE_NAME___API_KEY.\\r\\n\\r\\n See full list of supported init args and their descriptions in the params section.\\r\\n\\r\\n # TODO: Replace with relevant init params.\\r\\n Instantiate:\\r\\n .. code-block:: python\\r\\n\\r\\n from __module_name__ import __ModuleName__ByteStore\\r\\n\\r\\n kv_store = __ModuleName__ByteStore(\\r\\n # api_key=\\\"...\\\",\\r\\n # other params...\\r\\n )\\r\\n\\r\\n Set keys:\\r\\n .. code-block:: python\\r\\n\\r\\n kv_pairs = [\\r\\n [\\\"key1\\\", \\\"value1\\\"],\\r\\n [\\\"key2\\\", \\\"value2\\\"],\\r\\n ]\\r\\n\\r\\n kv_store.mset(kv_pairs)\\r\\n\\r\\n .. code-block:: python\\r\\n\\r\\n Get keys:\\r\\n .. code-block:: python\\r\\n\\r\\n kv_store.mget([\\\"key1\\\", \\\"key2\\\"])\\r\\n\\r\\n .. code-block:: python\\r\\n\\r\\n # TODO: Example output.\\r\\n\\r\\n Delete keys:\\r\\n ..code-block:: python\\r\\n\\r\\n kv_store.mdelete([\\\"key1\\\", \\\"key2\\\"])\\r\\n\\r\\n ..code-block:: python\\r\\n \\\"\\\"\\\" # noqa: E501\\r\\n```\", \"comments\": \"[]\", \"opened_by\": \"jacoblee93\"}\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The title of issue 24888 is \"Standardize KV-Store Docs\".\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"What is the title of issue 24888?\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `GithubToolkit` features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.github.toolkit.GitHubToolkit.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.1"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"What is the title of issue 24888?\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `GithubToolkit` features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.github.toolkit.GitHubToolkit.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -1,269 +1,267 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Gmail Toolkit\n",
|
||||
"\n",
|
||||
"This will help you getting started with the GMail [toolkit](/docs/concepts/tools/#toolkits). This toolkit interacts with the GMail API to read messages, draft and send messages, and more. For detailed documentation of all GmailToolkit features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.toolkit.GmailToolkit.html).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use this toolkit, you will need to set up your credentials explained in the [Gmail API docs](https://developers.google.com/gmail/api/quickstart/python#authorize_credentials_for_a_desktop_application). Once you've downloaded the `credentials.json` file, you can start using the Gmail API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `langchain-google-community` package. We'll need the `gmail` extra:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-community\\[gmail\\]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"By default the toolkit reads the local `credentials.json` file. You can also manually provide a `Credentials` object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_community import GmailToolkit\n",
|
||||
"\n",
|
||||
"toolkit = GmailToolkit()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Customizing Authentication\n",
|
||||
"\n",
|
||||
"Behind the scenes, a `googleapi` resource is created using the following methods. \n",
|
||||
"you can manually build a `googleapi` resource for more auth control. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_community.gmail.utils import (\n",
|
||||
" build_resource_service,\n",
|
||||
" get_gmail_credentials,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Can review scopes here https://developers.google.com/gmail/api/auth/scopes\n",
|
||||
"# For instance, readonly scope is 'https://www.googleapis.com/auth/gmail.readonly'\n",
|
||||
"credentials = get_gmail_credentials(\n",
|
||||
" token_file=\"token.json\",\n",
|
||||
" scopes=[\"https://mail.google.com/\"],\n",
|
||||
" client_secrets_file=\"credentials.json\",\n",
|
||||
")\n",
|
||||
"api_resource = build_resource_service(credentials=credentials)\n",
|
||||
"toolkit = GmailToolkit(api_resource=api_resource)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View available tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[GmailCreateDraft(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>),\n",
|
||||
" GmailSendMessage(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>),\n",
|
||||
" GmailSearch(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>),\n",
|
||||
" GmailGetMessage(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>),\n",
|
||||
" GmailGetThread(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>)]"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Gmail Toolkit\n",
|
||||
"\n",
|
||||
"This will help you getting started with the GMail [toolkit](/docs/concepts/tools/#toolkits). This toolkit interacts with the GMail API to read messages, draft and send messages, and more. For detailed documentation of all GmailToolkit features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.toolkit.GmailToolkit.html).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use this toolkit, you will need to set up your credentials explained in the [Gmail API docs](https://developers.google.com/gmail/api/quickstart/python#authorize_credentials_for_a_desktop_application). Once you've downloaded the `credentials.json` file, you can start using the Gmail API."
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"- [GmailCreateDraft](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.create_draft.GmailCreateDraft.html)\n",
|
||||
"- [GmailSendMessage](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.send_message.GmailSendMessage.html)\n",
|
||||
"- [GmailSearch](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.search.GmailSearch.html)\n",
|
||||
"- [GmailGetMessage](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.get_message.GmailGetMessage.html)\n",
|
||||
"- [GmailGetThread](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.get_thread.GmailGetThread.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"Below we show how to incorporate the toolkit into an [agent](/docs/tutorials/agents).\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Draft an email to fake@fake.com thanking them for coffee.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" create_gmail_draft (call_slGkYKZKA6h3Mf1CraUBzs6M)\n",
|
||||
" Call ID: call_slGkYKZKA6h3Mf1CraUBzs6M\n",
|
||||
" Args:\n",
|
||||
" message: Dear Fake,\n",
|
||||
"\n",
|
||||
"I wanted to take a moment to thank you for the coffee yesterday. It was a pleasure catching up with you. Let's do it again soon!\n",
|
||||
"\n",
|
||||
"Best regards,\n",
|
||||
"[Your Name]\n",
|
||||
" to: ['fake@fake.com']\n",
|
||||
" subject: Thank You for the Coffee\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: create_gmail_draft\n",
|
||||
"\n",
|
||||
"Draft created. Draft Id: r-7233782721440261513\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"I have drafted an email to fake@fake.com thanking them for the coffee. You can review and send it from your email draft with the subject \"Thank You for the Coffee\".\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `langchain-google-community` package. We'll need the `gmail` extra:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-community\\[gmail\\]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of individual tools, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"By default the toolkit reads the local `credentials.json` file. You can also manually provide a `Credentials` object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_community import GmailToolkit\n",
|
||||
"\n",
|
||||
"toolkit = GmailToolkit()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Customizing Authentication\n",
|
||||
"\n",
|
||||
"Behind the scenes, a `googleapi` resource is created using the following methods. \n",
|
||||
"you can manually build a `googleapi` resource for more auth control. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_community.gmail.utils import (\n",
|
||||
" build_resource_service,\n",
|
||||
" get_gmail_credentials,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Can review scopes here https://developers.google.com/gmail/api/auth/scopes\n",
|
||||
"# For instance, readonly scope is 'https://www.googleapis.com/auth/gmail.readonly'\n",
|
||||
"credentials = get_gmail_credentials(\n",
|
||||
" token_file=\"token.json\",\n",
|
||||
" scopes=[\"https://mail.google.com/\"],\n",
|
||||
" client_secrets_file=\"credentials.json\",\n",
|
||||
")\n",
|
||||
"api_resource = build_resource_service(credentials=credentials)\n",
|
||||
"toolkit = GmailToolkit(api_resource=api_resource)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View available tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[GmailCreateDraft(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>),\n",
|
||||
" GmailSendMessage(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>),\n",
|
||||
" GmailSearch(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>),\n",
|
||||
" GmailGetMessage(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>),\n",
|
||||
" GmailGetThread(api_resource=<googleapiclient.discovery.Resource object at 0x1094509d0>)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"- [GmailCreateDraft](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.create_draft.GmailCreateDraft.html)\n",
|
||||
"- [GmailSendMessage](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.send_message.GmailSendMessage.html)\n",
|
||||
"- [GmailSearch](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.search.GmailSearch.html)\n",
|
||||
"- [GmailGetMessage](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.get_message.GmailGetMessage.html)\n",
|
||||
"- [GmailGetThread](https://python.langchain.com/api_reference/google_community/gmail/langchain_google_community.gmail.get_thread.GmailGetThread.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"Below we show how to incorporate the toolkit into an [agent](/docs/tutorials/agents).\n",
|
||||
"\n",
|
||||
"We will need a LLM or chat model:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Draft an email to fake@fake.com thanking them for coffee.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" create_gmail_draft (call_slGkYKZKA6h3Mf1CraUBzs6M)\n",
|
||||
" Call ID: call_slGkYKZKA6h3Mf1CraUBzs6M\n",
|
||||
" Args:\n",
|
||||
" message: Dear Fake,\n",
|
||||
"\n",
|
||||
"I wanted to take a moment to thank you for the coffee yesterday. It was a pleasure catching up with you. Let's do it again soon!\n",
|
||||
"\n",
|
||||
"Best regards,\n",
|
||||
"[Your Name]\n",
|
||||
" to: ['fake@fake.com']\n",
|
||||
" subject: Thank You for the Coffee\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: create_gmail_draft\n",
|
||||
"\n",
|
||||
"Draft created. Draft Id: r-7233782721440261513\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"I have drafted an email to fake@fake.com thanking them for the coffee. You can review and send it from your email draft with the subject \"Thank You for the Coffee\".\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"Draft an email to fake@fake.com thanking them for coffee.\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `GmailToolkit` features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.gmail.toolkit.GmailToolkit.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"Draft an email to fake@fake.com thanking them for coffee.\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `GmailToolkit` features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.gmail.toolkit.GmailToolkit.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
217
docs/docs/integrations/tools/goat.ipynb
Normal file
217
docs/docs/integrations/tools/goat.ipynb
Normal file
@@ -0,0 +1,217 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: GOAT\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GOAT\n",
|
||||
"\n",
|
||||
"[GOAT](https://github.com/goat-sdk/goat) is the finance toolkit for AI agents.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"Create agents that can:\n",
|
||||
"\n",
|
||||
"- Send and receive payments\n",
|
||||
"- Purchase physical and digital goods and services\n",
|
||||
"- Engage in various investment strategies:\n",
|
||||
" - Earn yield\n",
|
||||
" - Bet on prediction markets\n",
|
||||
"- Purchase crypto assets\n",
|
||||
"- Tokenize any asset\n",
|
||||
"- Get financial insights\n",
|
||||
"\n",
|
||||
"### How it works\n",
|
||||
"GOAT leverages blockchains, cryptocurrencies (such as stablecoins), and wallets as the infrastructure to enable agents to become economic actors:\n",
|
||||
"\n",
|
||||
"1. Give your agent a [wallet](https://github.com/goat-sdk/goat/tree/main#chains-and-wallets)\n",
|
||||
"2. Allow it to transact [anywhere](https://github.com/goat-sdk/goat/tree/main#chains-and-wallets)\n",
|
||||
"3. Use more than [+200 tools](https://github.com/goat-sdk/goat/tree/main#tools)\n",
|
||||
"\n",
|
||||
"See everything GOAT supports [here](https://github.com/goat-sdk/goat/tree/main#chains-and-wallets).\n",
|
||||
"\n",
|
||||
"**Lightweight and extendable**\n",
|
||||
"Different from other toolkits, GOAT is designed to be lightweight and extendable by keeping its core minimal and allowing you to install only the tools you need.\n",
|
||||
"\n",
|
||||
"If you don't find what you need on our more than 200 integrations you can easily:\n",
|
||||
"\n",
|
||||
"- Create your own plugin\n",
|
||||
"- Integrate a new chain\n",
|
||||
"- Integrate a new wallet\n",
|
||||
"- Integrate a new agent framework\n",
|
||||
"\n",
|
||||
"See how to do it [here](https://github.com/goat-sdk/goat/tree/main#-contributing)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Quickstarts\n",
|
||||
"\n",
|
||||
"The best way to get started is by using the quickstarts below. See how you can configure GOAT to achieve any of the use cases below.\n",
|
||||
"\n",
|
||||
"- **By use case**\n",
|
||||
" - **Money transmission**\n",
|
||||
" - Send and receive payments [[EVM](https://github.com/goat-sdk/goat/tree/main/python/examples/by-use-case/evm-send-and-receive-tokens), [Solana](https://github.com/goat-sdk/goat/tree/main/python/examples/by-use-case/solana-send-and-receive-tokens)]\n",
|
||||
" - **Investing**\n",
|
||||
" - Generate yield [[Solana](https://github.com/goat-sdk/goat/tree/main/python/examples/by-use-case/solana-usdc-yield-deposit)]\n",
|
||||
" - Purchase crypto assets [[EVM](https://github.com/goat-sdk/goat/tree/main/python/examples/by-use-case/evm-swap-tokens), [Solana](https://github.com/goat-sdk/goat/tree/main/python/examples/by-use-case/solana-swap-tokens)]\n",
|
||||
"- **By wallet**\n",
|
||||
" - [Crossmint](https://github.com/goat-sdk/goat/tree/main/python/examples/by-wallet/crossmint)\n",
|
||||
"- **See all python quickstarts [here](https://github.com/goat-sdk/goat/tree/main/python/examples).**\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "abd26764",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"1. Install the core package and langchain adapter:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install goat-sdk goat-sdk-adapter-langchain\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"2. Install the type of wallet you want to use (e.g solana):\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install goat-sdk-wallet-solana\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"3. Install the plugins you want to use in that chain:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install goat-sdk-plugin-spl-token\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our toolkit:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"from goat_adapters.langchain import get_on_chain_tools\n",
|
||||
"from goat_wallets.solana import solana, send_solana\n",
|
||||
"from goat_plugins.spl_token import spl_token, SplTokenPluginOptions\n",
|
||||
"from goat_plugins.spl_token.tokens import SPL_TOKENS\n",
|
||||
"\n",
|
||||
"# Initialize Solana client\n",
|
||||
"client = SolanaClient(os.getenv(\"SOLANA_RPC_ENDPOINT\"))\n",
|
||||
"\n",
|
||||
"# Initialize regular Solana wallet\n",
|
||||
"keypair = Keypair.from_base58_string(os.getenv(\"SOLANA_WALLET_SEED\") or \"\")\n",
|
||||
"wallet = solana(client, keypair)\n",
|
||||
"\n",
|
||||
"tools = get_on_chain_tools(\n",
|
||||
" wallet=wallet,\n",
|
||||
" plugins=[\n",
|
||||
" send_solana(),\n",
|
||||
" spl_token(SplTokenPluginOptions(\n",
|
||||
" network=\"mainnet\", # Using devnet as specified in .env\n",
|
||||
" tokens=SPL_TOKENS\n",
|
||||
" )),\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Invocation\n",
|
||||
"```python\n",
|
||||
"tools[\"get_balance\"].invoke({ \"address\": \"0x1234567890123456789012345678901234567890\" })\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import os\n",
|
||||
"import asyncio\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"# Load environment variables\n",
|
||||
"load_dotenv()\n",
|
||||
"\n",
|
||||
"from solana.rpc.api import Client as SolanaClient\n",
|
||||
"from solders.keypair import Keypair\n",
|
||||
"\n",
|
||||
"from goat_adapters.langchain import get_on_chain_tools\n",
|
||||
"from goat_wallets.solana import solana, send_solana\n",
|
||||
"from goat_plugins.spl_token import spl_token, SplTokenPluginOptions\n",
|
||||
"from goat_plugins.spl_token.tokens import SPL_TOKENS\n",
|
||||
"\n",
|
||||
"# Initialize Solana client\n",
|
||||
"client = SolanaClient(os.getenv(\"SOLANA_RPC_ENDPOINT\"))\n",
|
||||
"\n",
|
||||
"# Initialize regular Solana wallet\n",
|
||||
"keypair = Keypair.from_base58_string(os.getenv(\"SOLANA_WALLET_SEED\") or \"\")\n",
|
||||
"wallet = solana(client, keypair)\n",
|
||||
"\n",
|
||||
"# Initialize LLM\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"def main():\n",
|
||||
" # Initialize tools with Solana wallet\n",
|
||||
" tools = get_on_chain_tools(\n",
|
||||
" wallet=wallet,\n",
|
||||
" plugins=[\n",
|
||||
" send_solana(),\n",
|
||||
" spl_token(SplTokenPluginOptions(\n",
|
||||
" network=\"mainnet\", # Using devnet as specified in .env\n",
|
||||
" tokens=SPL_TOKENS\n",
|
||||
" )),\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Initialize agent\n",
|
||||
" # Your agent code here\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" main()\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"- For a complete list of tools, see the [GOAT SDK documentation](https://github.com/goat-sdk/goat).\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,351 +1,349 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cfe4185a-34dc-4cdc-b831-001954f2d6e8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Requests Toolkit\n",
|
||||
"\n",
|
||||
"We can use the Requests [toolkit](/docs/concepts/tools/#toolkits) to construct agents that generate HTTP requests.\n",
|
||||
"\n",
|
||||
"For detailed documentation of all API toolkit features and configurations head to the API reference for [RequestsToolkit](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.openapi.toolkit.RequestsToolkit.html).\n",
|
||||
"\n",
|
||||
"## ⚠️ Security note ⚠️\n",
|
||||
"There are inherent risks in giving models discretion to execute real-world actions. Take precautions to mitigate these risks:\n",
|
||||
"\n",
|
||||
"- Make sure that permissions associated with the tools are narrowly-scoped (e.g., for database operations or API requests);\n",
|
||||
"- When desired, make use of human-in-the-loop workflows."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d968e982-f370-4614-8469-c1bc71ee3e32",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `langchain-community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f74f05fb-3f24-4c0b-a17f-cf4edeedbb9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36a178eb-1f2c-411e-bf25-0240ead4c62a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that if you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8e68d0cd-6233-481c-b048-e8d95cba4c35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a7e2f64a-a72e-4fef-be52-eaf7c5072d24",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"First we will demonstrate a minimal example.\n",
|
||||
"\n",
|
||||
"**NOTE**: There are inherent risks in giving models discretion to execute real-world actions. We must \"opt-in\" to these risks by setting `allow_dangerous_request=True` to use these tools.\n",
|
||||
"**This can be dangerous for calling unwanted requests**. Please make sure your custom OpenAPI spec (yaml) is safe and that permissions associated with the tools are narrowly-scoped."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "018bd070-9fc8-459b-8d28-b4a3e283e640",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ALLOW_DANGEROUS_REQUEST = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a024f7b3-5437-4878-bd16-c4783bff394c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can use the [JSONPlaceholder](https://jsonplaceholder.typicode.com) API as a testing ground.\n",
|
||||
"\n",
|
||||
"Let's create (a subset of) its API spec:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2dcbcf92-2ad5-49c3-94ac-91047ccc8c5b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, Dict, Union\n",
|
||||
"\n",
|
||||
"import requests\n",
|
||||
"import yaml\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _get_schema(response_json: Union[dict, list]) -> dict:\n",
|
||||
" if isinstance(response_json, list):\n",
|
||||
" response_json = response_json[0] if response_json else {}\n",
|
||||
" return {key: type(value).__name__ for key, value in response_json.items()}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _get_api_spec() -> str:\n",
|
||||
" base_url = \"https://jsonplaceholder.typicode.com\"\n",
|
||||
" endpoints = [\n",
|
||||
" \"/posts\",\n",
|
||||
" \"/comments\",\n",
|
||||
" ]\n",
|
||||
" common_query_parameters = [\n",
|
||||
" {\n",
|
||||
" \"name\": \"_limit\",\n",
|
||||
" \"in\": \"query\",\n",
|
||||
" \"required\": False,\n",
|
||||
" \"schema\": {\"type\": \"integer\", \"example\": 2},\n",
|
||||
" \"description\": \"Limit the number of results\",\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" openapi_spec: Dict[str, Any] = {\n",
|
||||
" \"openapi\": \"3.0.0\",\n",
|
||||
" \"info\": {\"title\": \"JSONPlaceholder API\", \"version\": \"1.0.0\"},\n",
|
||||
" \"servers\": [{\"url\": base_url}],\n",
|
||||
" \"paths\": {},\n",
|
||||
" }\n",
|
||||
" # Iterate over the endpoints to construct the paths\n",
|
||||
" for endpoint in endpoints:\n",
|
||||
" response = requests.get(base_url + endpoint)\n",
|
||||
" if response.status_code == 200:\n",
|
||||
" schema = _get_schema(response.json())\n",
|
||||
" openapi_spec[\"paths\"][endpoint] = {\n",
|
||||
" \"get\": {\n",
|
||||
" \"summary\": f\"Get {endpoint[1:]}\",\n",
|
||||
" \"parameters\": common_query_parameters,\n",
|
||||
" \"responses\": {\n",
|
||||
" \"200\": {\n",
|
||||
" \"description\": \"Successful response\",\n",
|
||||
" \"content\": {\n",
|
||||
" \"application/json\": {\n",
|
||||
" \"schema\": {\"type\": \"object\", \"properties\": schema}\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" return yaml.dump(openapi_spec, sort_keys=False)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"api_spec = _get_api_spec()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "db3d6148-ae65-4a1d-91a6-59ee3e4e6efa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next we can instantiate the toolkit. We require no authorization or other headers for this API:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "63a630b3-45bb-4525-865b-083f322b944b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit\n",
|
||||
"from langchain_community.utilities.requests import TextRequestsWrapper\n",
|
||||
"\n",
|
||||
"toolkit = RequestsToolkit(\n",
|
||||
" requests_wrapper=TextRequestsWrapper(headers={}),\n",
|
||||
" allow_dangerous_requests=ALLOW_DANGEROUS_REQUEST,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f4224a64-843a-479d-8a7b-84719e4b9d0c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View available tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "70ea0f4e-9f10-4906-894b-08df832fd515",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[RequestsGetTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),\n",
|
||||
" RequestsPostTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),\n",
|
||||
" RequestsPatchTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),\n",
|
||||
" RequestsPutTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),\n",
|
||||
" RequestsDeleteTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True)]"
|
||||
"cell_type": "markdown",
|
||||
"id": "cfe4185a-34dc-4cdc-b831-001954f2d6e8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Requests Toolkit\n",
|
||||
"\n",
|
||||
"We can use the Requests [toolkit](/docs/concepts/tools/#toolkits) to construct agents that generate HTTP requests.\n",
|
||||
"\n",
|
||||
"For detailed documentation of all API toolkit features and configurations head to the API reference for [RequestsToolkit](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.openapi.toolkit.RequestsToolkit.html).\n",
|
||||
"\n",
|
||||
"## ⚠️ Security note ⚠️\n",
|
||||
"There are inherent risks in giving models discretion to execute real-world actions. Take precautions to mitigate these risks:\n",
|
||||
"\n",
|
||||
"- Make sure that permissions associated with the tools are narrowly-scoped (e.g., for database operations or API requests);\n",
|
||||
"- When desired, make use of human-in-the-loop workflows."
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a21a6ca4-d650-4b7d-a944-1a8771b5293a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"- [RequestsGetTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsGetTool.html)\n",
|
||||
"- [RequestsPostTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsPostTool.html)\n",
|
||||
"- [RequestsPatchTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsPatchTool.html)\n",
|
||||
"- [RequestsPutTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsPutTool.html)\n",
|
||||
"- [RequestsDeleteTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsDeleteTool.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2dbb304-abf2-472a-9130-f03150a40549",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "db062da7-f22c-4f36-9df8-1da96c9f7538",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"system_message = \"\"\"\n",
|
||||
"You have access to an API to help answer user queries.\n",
|
||||
"Here is documentation on the API:\n",
|
||||
"{api_spec}\n",
|
||||
"\"\"\".format(api_spec=api_spec)\n",
|
||||
"\n",
|
||||
"agent_executor = create_react_agent(llm, tools, prompt=system_message)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c1e47be9-374a-457c-928a-48f02b5530e3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Fetch the top two posts. What are their titles?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" requests_get (call_RV2SOyzCnV5h2sm4WPgG8fND)\n",
|
||||
" Call ID: call_RV2SOyzCnV5h2sm4WPgG8fND\n",
|
||||
" Args:\n",
|
||||
" url: https://jsonplaceholder.typicode.com/posts?_limit=2\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: requests_get\n",
|
||||
"\n",
|
||||
"[\n",
|
||||
" {\n",
|
||||
" \"userId\": 1,\n",
|
||||
" \"id\": 1,\n",
|
||||
" \"title\": \"sunt aut facere repellat provident occaecati excepturi optio reprehenderit\",\n",
|
||||
" \"body\": \"quia et suscipit\\nsuscipit recusandae consequuntur expedita et cum\\nreprehenderit molestiae ut ut quas totam\\nnostrum rerum est autem sunt rem eveniet architecto\"\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"userId\": 1,\n",
|
||||
" \"id\": 2,\n",
|
||||
" \"title\": \"qui est esse\",\n",
|
||||
" \"body\": \"est rerum tempore vitae\\nsequi sint nihil reprehenderit dolor beatae ea dolores neque\\nfugiat blanditiis voluptate porro vel nihil molestiae ut reiciendis\\nqui aperiam non debitis possimus qui neque nisi nulla\"\n",
|
||||
" }\n",
|
||||
"]\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The titles of the top two posts are:\n",
|
||||
"1. \"sunt aut facere repellat provident occaecati excepturi optio reprehenderit\"\n",
|
||||
"2. \"qui est esse\"\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"id": "d968e982-f370-4614-8469-c1bc71ee3e32",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `langchain-community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f74f05fb-3f24-4c0b-a17f-cf4edeedbb9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36a178eb-1f2c-411e-bf25-0240ead4c62a",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of individual tools, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8e68d0cd-6233-481c-b048-e8d95cba4c35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a7e2f64a-a72e-4fef-be52-eaf7c5072d24",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"First we will demonstrate a minimal example.\n",
|
||||
"\n",
|
||||
"**NOTE**: There are inherent risks in giving models discretion to execute real-world actions. We must \"opt-in\" to these risks by setting `allow_dangerous_request=True` to use these tools.\n",
|
||||
"**This can be dangerous for calling unwanted requests**. Please make sure your custom OpenAPI spec (yaml) is safe and that permissions associated with the tools are narrowly-scoped."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "018bd070-9fc8-459b-8d28-b4a3e283e640",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ALLOW_DANGEROUS_REQUEST = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a024f7b3-5437-4878-bd16-c4783bff394c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can use the [JSONPlaceholder](https://jsonplaceholder.typicode.com) API as a testing ground.\n",
|
||||
"\n",
|
||||
"Let's create (a subset of) its API spec:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "2dcbcf92-2ad5-49c3-94ac-91047ccc8c5b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, Dict, Union\n",
|
||||
"\n",
|
||||
"import requests\n",
|
||||
"import yaml\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _get_schema(response_json: Union[dict, list]) -> dict:\n",
|
||||
" if isinstance(response_json, list):\n",
|
||||
" response_json = response_json[0] if response_json else {}\n",
|
||||
" return {key: type(value).__name__ for key, value in response_json.items()}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def _get_api_spec() -> str:\n",
|
||||
" base_url = \"https://jsonplaceholder.typicode.com\"\n",
|
||||
" endpoints = [\n",
|
||||
" \"/posts\",\n",
|
||||
" \"/comments\",\n",
|
||||
" ]\n",
|
||||
" common_query_parameters = [\n",
|
||||
" {\n",
|
||||
" \"name\": \"_limit\",\n",
|
||||
" \"in\": \"query\",\n",
|
||||
" \"required\": False,\n",
|
||||
" \"schema\": {\"type\": \"integer\", \"example\": 2},\n",
|
||||
" \"description\": \"Limit the number of results\",\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" openapi_spec: Dict[str, Any] = {\n",
|
||||
" \"openapi\": \"3.0.0\",\n",
|
||||
" \"info\": {\"title\": \"JSONPlaceholder API\", \"version\": \"1.0.0\"},\n",
|
||||
" \"servers\": [{\"url\": base_url}],\n",
|
||||
" \"paths\": {},\n",
|
||||
" }\n",
|
||||
" # Iterate over the endpoints to construct the paths\n",
|
||||
" for endpoint in endpoints:\n",
|
||||
" response = requests.get(base_url + endpoint)\n",
|
||||
" if response.status_code == 200:\n",
|
||||
" schema = _get_schema(response.json())\n",
|
||||
" openapi_spec[\"paths\"][endpoint] = {\n",
|
||||
" \"get\": {\n",
|
||||
" \"summary\": f\"Get {endpoint[1:]}\",\n",
|
||||
" \"parameters\": common_query_parameters,\n",
|
||||
" \"responses\": {\n",
|
||||
" \"200\": {\n",
|
||||
" \"description\": \"Successful response\",\n",
|
||||
" \"content\": {\n",
|
||||
" \"application/json\": {\n",
|
||||
" \"schema\": {\"type\": \"object\", \"properties\": schema}\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" return yaml.dump(openapi_spec, sort_keys=False)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"api_spec = _get_api_spec()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "db3d6148-ae65-4a1d-91a6-59ee3e4e6efa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next we can instantiate the toolkit. We require no authorization or other headers for this API:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "63a630b3-45bb-4525-865b-083f322b944b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit\n",
|
||||
"from langchain_community.utilities.requests import TextRequestsWrapper\n",
|
||||
"\n",
|
||||
"toolkit = RequestsToolkit(\n",
|
||||
" requests_wrapper=TextRequestsWrapper(headers={}),\n",
|
||||
" allow_dangerous_requests=ALLOW_DANGEROUS_REQUEST,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f4224a64-843a-479d-8a7b-84719e4b9d0c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View available tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "70ea0f4e-9f10-4906-894b-08df832fd515",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[RequestsGetTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),\n",
|
||||
" RequestsPostTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),\n",
|
||||
" RequestsPatchTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),\n",
|
||||
" RequestsPutTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True),\n",
|
||||
" RequestsDeleteTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a21a6ca4-d650-4b7d-a944-1a8771b5293a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"- [RequestsGetTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsGetTool.html)\n",
|
||||
"- [RequestsPostTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsPostTool.html)\n",
|
||||
"- [RequestsPatchTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsPatchTool.html)\n",
|
||||
"- [RequestsPutTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsPutTool.html)\n",
|
||||
"- [RequestsDeleteTool](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.requests.tool.RequestsDeleteTool.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e2dbb304-abf2-472a-9130-f03150a40549",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "db062da7-f22c-4f36-9df8-1da96c9f7538",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"system_message = \"\"\"\n",
|
||||
"You have access to an API to help answer user queries.\n",
|
||||
"Here is documentation on the API:\n",
|
||||
"{api_spec}\n",
|
||||
"\"\"\".format(api_spec=api_spec)\n",
|
||||
"\n",
|
||||
"agent_executor = create_react_agent(llm, tools, prompt=system_message)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "c1e47be9-374a-457c-928a-48f02b5530e3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Fetch the top two posts. What are their titles?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" requests_get (call_RV2SOyzCnV5h2sm4WPgG8fND)\n",
|
||||
" Call ID: call_RV2SOyzCnV5h2sm4WPgG8fND\n",
|
||||
" Args:\n",
|
||||
" url: https://jsonplaceholder.typicode.com/posts?_limit=2\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: requests_get\n",
|
||||
"\n",
|
||||
"[\n",
|
||||
" {\n",
|
||||
" \"userId\": 1,\n",
|
||||
" \"id\": 1,\n",
|
||||
" \"title\": \"sunt aut facere repellat provident occaecati excepturi optio reprehenderit\",\n",
|
||||
" \"body\": \"quia et suscipit\\nsuscipit recusandae consequuntur expedita et cum\\nreprehenderit molestiae ut ut quas totam\\nnostrum rerum est autem sunt rem eveniet architecto\"\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"userId\": 1,\n",
|
||||
" \"id\": 2,\n",
|
||||
" \"title\": \"qui est esse\",\n",
|
||||
" \"body\": \"est rerum tempore vitae\\nsequi sint nihil reprehenderit dolor beatae ea dolores neque\\nfugiat blanditiis voluptate porro vel nihil molestiae ut reiciendis\\nqui aperiam non debitis possimus qui neque nisi nulla\"\n",
|
||||
" }\n",
|
||||
"]\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The titles of the top two posts are:\n",
|
||||
"1. \"sunt aut facere repellat provident occaecati excepturi optio reprehenderit\"\n",
|
||||
"2. \"qui est esse\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"Fetch the top two posts. What are their titles?\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01ec4886-de3d-4fda-bd05-e3f254810969",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all API toolkit features and configurations head to the API reference for [RequestsToolkit](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.openapi.toolkit.RequestsToolkit.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"Fetch the top two posts. What are their titles?\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01ec4886-de3d-4fda-bd05-e3f254810969",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all API toolkit features and configurations head to the API reference for [RequestsToolkit](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.openapi.toolkit.RequestsToolkit.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,274 +1,272 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Slack Toolkit\n",
|
||||
"\n",
|
||||
"This will help you getting started with the Slack [toolkit](/docs/concepts/tools/#toolkits). For detailed documentation of all SlackToolkit features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.slack.toolkit.SlackToolkit.html).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use this toolkit, you will need to get a token as explained in the [Slack API docs](https://api.slack.com/tutorials/tracks/getting-a-token). Once you've received a SLACK_USER_TOKEN, you can input it as an environment variable below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"SLACK_USER_TOKEN\"):\n",
|
||||
" os.environ[\"SLACK_USER_TOKEN\"] = getpass.getpass(\"Enter your Slack user token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing from runs of individual tools, you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `langchain-community` package. We will also need the Slack SDK:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community slack_sdk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Optionally, we can install beautifulsoup4 to assist in parsing HTML messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU beautifulsoup4 # This is optional but is useful for parsing HTML messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our toolkit:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.agent_toolkits import SlackToolkit\n",
|
||||
"\n",
|
||||
"toolkit = SlackToolkit()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View available tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SlackGetChannel(client=<slack_sdk.web.client.WebClient object at 0x113caa8c0>),\n",
|
||||
" SlackGetMessage(client=<slack_sdk.web.client.WebClient object at 0x113caa4d0>),\n",
|
||||
" SlackScheduleMessage(client=<slack_sdk.web.client.WebClient object at 0x113caa440>),\n",
|
||||
" SlackSendMessage(client=<slack_sdk.web.client.WebClient object at 0x113caa410>)]"
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Slack Toolkit\n",
|
||||
"\n",
|
||||
"This will help you getting started with the Slack [toolkit](/docs/concepts/tools/#toolkits). For detailed documentation of all SlackToolkit features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.slack.toolkit.SlackToolkit.html).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use this toolkit, you will need to get a token as explained in the [Slack API docs](https://api.slack.com/tutorials/tracks/getting-a-token). Once you've received a SLACK_USER_TOKEN, you can input it as an environment variable below."
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This toolkit loads:\n",
|
||||
"\n",
|
||||
"- [SlackGetChannel](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.slack.get_channel.SlackGetChannel.html)\n",
|
||||
"- [SlackGetMessage](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.slack.get_message.SlackGetMessage.html)\n",
|
||||
"- [SlackScheduleMessage](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.slack.schedule_message.SlackScheduleMessage.html)\n",
|
||||
"- [SlackSendMessage](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.slack.send_message.SlackSendMessage.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"Let's equip an agent with the Slack toolkit and query for information about a channel."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"When was the #general channel created?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" get_channelid_name_dict (call_NXDkALjoOx97uF1v0CoZTqtJ)\n",
|
||||
" Call ID: call_NXDkALjoOx97uF1v0CoZTqtJ\n",
|
||||
" Args:\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The #general channel was created on timestamp 1671043305.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"When was the #general channel created?\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" message = event[\"messages\"][-1]\n",
|
||||
" if message.type != \"tool\": # mask sensitive information\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"SLACK_USER_TOKEN\"):\n",
|
||||
" os.environ[\"SLACK_USER_TOKEN\"] = getpass.getpass(\"Enter your Slack user token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Send a friendly greeting to channel C072Q1LP4QM.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" send_message (call_xQxpv4wFeAZNZgSBJRIuaizi)\n",
|
||||
" Call ID: call_xQxpv4wFeAZNZgSBJRIuaizi\n",
|
||||
" Args:\n",
|
||||
" message: Hello! Have a great day!\n",
|
||||
" channel: C072Q1LP4QM\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"I have sent a friendly greeting to the channel C072Q1LP4QM.\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of individual tools, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"This toolkit lives in the `langchain-community` package. We will also need the Slack SDK:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community slack_sdk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Optionally, we can install beautifulsoup4 to assist in parsing HTML messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU beautifulsoup4 # This is optional but is useful for parsing HTML messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our toolkit:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.agent_toolkits import SlackToolkit\n",
|
||||
"\n",
|
||||
"toolkit = SlackToolkit()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tools\n",
|
||||
"\n",
|
||||
"View available tools:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SlackGetChannel(client=<slack_sdk.web.client.WebClient object at 0x113caa8c0>),\n",
|
||||
" SlackGetMessage(client=<slack_sdk.web.client.WebClient object at 0x113caa4d0>),\n",
|
||||
" SlackScheduleMessage(client=<slack_sdk.web.client.WebClient object at 0x113caa440>),\n",
|
||||
" SlackSendMessage(client=<slack_sdk.web.client.WebClient object at 0x113caa410>)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tools = toolkit.get_tools()\n",
|
||||
"\n",
|
||||
"tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This toolkit loads:\n",
|
||||
"\n",
|
||||
"- [SlackGetChannel](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.slack.get_channel.SlackGetChannel.html)\n",
|
||||
"- [SlackGetMessage](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.slack.get_message.SlackGetMessage.html)\n",
|
||||
"- [SlackScheduleMessage](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.slack.schedule_message.SlackScheduleMessage.html)\n",
|
||||
"- [SlackSendMessage](https://python.langchain.com/api_reference/community/tools/langchain_community.tools.slack.send_message.SlackSendMessage.html)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within an agent\n",
|
||||
"\n",
|
||||
"Let's equip an agent with the Slack toolkit and query for information about a channel."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"agent_executor = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"When was the #general channel created?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" get_channelid_name_dict (call_NXDkALjoOx97uF1v0CoZTqtJ)\n",
|
||||
" Call ID: call_NXDkALjoOx97uF1v0CoZTqtJ\n",
|
||||
" Args:\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"The #general channel was created on timestamp 1671043305.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"When was the #general channel created?\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" message = event[\"messages\"][-1]\n",
|
||||
" if message.type != \"tool\": # mask sensitive information\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Send a friendly greeting to channel C072Q1LP4QM.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" send_message (call_xQxpv4wFeAZNZgSBJRIuaizi)\n",
|
||||
" Call ID: call_xQxpv4wFeAZNZgSBJRIuaizi\n",
|
||||
" Args:\n",
|
||||
" message: Hello! Have a great day!\n",
|
||||
" channel: C072Q1LP4QM\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"I have sent a friendly greeting to the channel C072Q1LP4QM.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"Send a friendly greeting to channel C072Q1LP4QM.\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" message = event[\"messages\"][-1]\n",
|
||||
" if message.type != \"tool\": # mask sensitive information\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `SlackToolkit` features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.slack.toolkit.SlackToolkit.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_query = \"Send a friendly greeting to channel C072Q1LP4QM.\"\n",
|
||||
"\n",
|
||||
"events = agent_executor.stream(\n",
|
||||
" {\"messages\": [(\"user\", example_query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
")\n",
|
||||
"for event in events:\n",
|
||||
" message = event[\"messages\"][-1]\n",
|
||||
" if message.type != \"tool\": # mask sensitive information\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `SlackToolkit` features and configurations head to the [API reference](https://python.langchain.com/api_reference/community/agent_toolkits/langchain_community.agent_toolkits.slack.toolkit.SlackToolkit.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -123,7 +123,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, Query and retrive data"
|
||||
"Finally, Query and retrieve data"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -123,7 +123,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, Query and retrive data"
|
||||
"Finally, Query and retrieve data"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -462,6 +462,475 @@
|
||||
"retriever.invoke(\"Stealing from the bank is a crime\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5828dda5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Distance Similarity Algorithm\n",
|
||||
"\n",
|
||||
"Elasticsearch supports the following vector distance similarity algorithms:\n",
|
||||
"\n",
|
||||
"- cosine\n",
|
||||
"- euclidean\n",
|
||||
"- dot_product\n",
|
||||
"\n",
|
||||
"The cosine similarity algorithm is the default.\n",
|
||||
"\n",
|
||||
"You can specify the similarity Algorithm needed via the similarity parameter.\n",
|
||||
"\n",
|
||||
"**NOTE**: Depending on the retrieval strategy, the similarity algorithm cannot be changed at query time. It is needed to be set when creating the index mapping for field. If you need to change the similarity algorithm, you need to delete the index and recreate it with the correct distance_strategy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cec8b2ac",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = ElasticsearchStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" es_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"test\",\n",
|
||||
" distance_strategy=\"COSINE\",\n",
|
||||
" # distance_strategy=\"EUCLIDEAN_DISTANCE\"\n",
|
||||
" # distance_strategy=\"DOT_PRODUCT\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0c9fb8a0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retrieval Strategies\n",
|
||||
"\n",
|
||||
"Elasticsearch has big advantages over other vector only databases from its ability to support a wide range of retrieval strategies. In this notebook we will configure `ElasticsearchStore` to support some of the most common retrieval strategies.\n",
|
||||
"\n",
|
||||
"By default, `ElasticsearchStore` uses the `DenseVectorStrategy` (was called `ApproxRetrievalStrategy` prior to version 0.2.0).\n",
|
||||
"\n",
|
||||
"### DenseVectorStrategy\n",
|
||||
"\n",
|
||||
"This will return the top k most similar vectors to the query vector. The `k` parameter is set when the `ElasticsearchStore` is initialized. The default value is 10."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4d59a493",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_elasticsearch import DenseVectorStrategy\n",
|
||||
"\n",
|
||||
"db = ElasticsearchStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" es_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"test\",\n",
|
||||
" strategy=DenseVectorStrategy(),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = db.similarity_search(\n",
|
||||
" query=\"What did the president say about Ketanji Brown Jackson?\", k=10\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0cf5d3d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Example: Hybrid retrieval with dense vector and keyword search\n",
|
||||
"\n",
|
||||
"This example will show how to configure ElasticsearchStore to perform a hybrid retrieval, using a combination of approximate semantic search and keyword based search.\n",
|
||||
"\n",
|
||||
"We use RRF to balance the two scores from different retrieval methods.\n",
|
||||
"\n",
|
||||
"To enable hybrid retrieval, we need to set `hybrid=True` in the `DenseVectorStrategy` constructor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "109f992a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = ElasticsearchStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" es_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"test\",\n",
|
||||
" strategy=DenseVectorStrategy(hybrid=True),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b6e62ef0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"When hybrid is enabled, the query performed will be a combination of approximate semantic search and keyword based search.\n",
|
||||
"\n",
|
||||
"It will use rrf (Reciprocal Rank Fusion) to balance the two scores from different retrieval methods.\n",
|
||||
"\n",
|
||||
"**Note**: RRF requires Elasticsearch 8.9.0 or above."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9c07444e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"{\n",
|
||||
" \"retriever\": {\n",
|
||||
" \"rrf\": {\n",
|
||||
" \"retrievers\": [\n",
|
||||
" {\n",
|
||||
" \"standard\": {\n",
|
||||
" \"query\": {\n",
|
||||
" \"bool\": {\n",
|
||||
" \"filter\": [],\n",
|
||||
" \"must\": [{\"match\": {\"text\": {\"query\": \"foo\"}}}],\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"knn\": {\n",
|
||||
" \"field\": \"vector\",\n",
|
||||
" \"filter\": [],\n",
|
||||
" \"k\": 1,\n",
|
||||
" \"num_candidates\": 50,\n",
|
||||
" \"query_vector\": [1.0, ..., 0.0],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2822fbf7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Example: Dense vector search with Embedding Model in Elasticsearch\n",
|
||||
"\n",
|
||||
"This example will show how to configure `ElasticsearchStore` to use the embedding model deployed in Elasticsearch for dense vector retrieval.\n",
|
||||
"\n",
|
||||
"To use this, specify the model_id in `DenseVectorStrategy` constructor via the `query_model_id` argument.\n",
|
||||
"\n",
|
||||
"**NOTE**: This requires the model to be deployed and running in Elasticsearch ML node. See [notebook example](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/hugging-face/loading-model-from-hugging-face.ipynb) on how to deploy the model with `eland`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d97d9db4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"DENSE_SELF_DEPLOYED_INDEX_NAME = \"test-dense-self-deployed\"\n",
|
||||
"\n",
|
||||
"# Note: This does not have an embedding function specified\n",
|
||||
"# Instead, we will use the embedding model deployed in Elasticsearch\n",
|
||||
"db = ElasticsearchStore(\n",
|
||||
" es_cloud_id=\"<your cloud id>\",\n",
|
||||
" es_user=\"elastic\",\n",
|
||||
" es_password=\"<your password>\",\n",
|
||||
" index_name=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
|
||||
" query_field=\"text_field\",\n",
|
||||
" vector_query_field=\"vector_query_field.predicted_value\",\n",
|
||||
" strategy=DenseVectorStrategy(model_id=\"sentence-transformers__all-minilm-l6-v2\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Setup a Ingest Pipeline to perform the embedding\n",
|
||||
"# of the text field\n",
|
||||
"db.client.ingest.put_pipeline(\n",
|
||||
" id=\"test_pipeline\",\n",
|
||||
" processors=[\n",
|
||||
" {\n",
|
||||
" \"inference\": {\n",
|
||||
" \"model_id\": \"sentence-transformers__all-minilm-l6-v2\",\n",
|
||||
" \"field_map\": {\"query_field\": \"text_field\"},\n",
|
||||
" \"target_field\": \"vector_query_field\",\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# creating a new index with the pipeline,\n",
|
||||
"# not relying on langchain to create the index\n",
|
||||
"db.client.indices.create(\n",
|
||||
" index=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
|
||||
" mappings={\n",
|
||||
" \"properties\": {\n",
|
||||
" \"text_field\": {\"type\": \"text\"},\n",
|
||||
" \"vector_query_field\": {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"predicted_value\": {\n",
|
||||
" \"type\": \"dense_vector\",\n",
|
||||
" \"dims\": 384,\n",
|
||||
" \"index\": True,\n",
|
||||
" \"similarity\": \"l2_norm\",\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" settings={\"index\": {\"default_pipeline\": \"test_pipeline\"}},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"db.from_texts(\n",
|
||||
" [\"hello world\"],\n",
|
||||
" es_cloud_id=\"<cloud id>\",\n",
|
||||
" es_user=\"elastic\",\n",
|
||||
" es_password=\"<cloud password>\",\n",
|
||||
" index_name=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
|
||||
" query_field=\"text_field\",\n",
|
||||
" vector_query_field=\"vector_query_field.predicted_value\",\n",
|
||||
" strategy=DenseVectorStrategy(model_id=\"sentence-transformers__all-minilm-l6-v2\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Perform search\n",
|
||||
"db.similarity_search(\"hello world\", k=10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b9651b01",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### SparseVectorStrategy (ELSER)\n",
|
||||
"\n",
|
||||
"This strategy uses Elasticsearch's sparse vector retrieval to retrieve the top-k results. We only support our own \"ELSER\" embedding model for now.\n",
|
||||
"\n",
|
||||
"**NOTE**: This requires the ELSER model to be deployed and running in Elasticsearch ml node.\n",
|
||||
"\n",
|
||||
"To use this, specify `SparseVectorStrategy` (was called `SparseVectorRetrievalStrategy` prior to version 0.2.0) in the `ElasticsearchStore` constructor. You will need to provide a model ID."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c750ff57",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_elasticsearch import SparseVectorStrategy\n",
|
||||
"\n",
|
||||
"# Note that this example doesn't have an embedding function. This is because we infer the tokens at index time and at query time within Elasticsearch.\n",
|
||||
"# This requires the ELSER model to be loaded and running in Elasticsearch.\n",
|
||||
"db = ElasticsearchStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" es_cloud_id=\"<cloud id>\",\n",
|
||||
" es_user=\"elastic\",\n",
|
||||
" es_password=\"<cloud password>\",\n",
|
||||
" index_name=\"test-elser\",\n",
|
||||
" strategy=SparseVectorStrategy(model_id=\".elser_model_2\"),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"db.client.indices.refresh(index=\"test-elser\")\n",
|
||||
"\n",
|
||||
"results = db.similarity_search(\n",
|
||||
" \"What did the president say about Ketanji Brown Jackson\", k=4\n",
|
||||
")\n",
|
||||
"print(results[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "416e224e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### DenseVectorScriptScoreStrategy\n",
|
||||
"\n",
|
||||
"This strategy uses Elasticsearch's script score query to perform exact vector retrieval (also known as brute force) to retrieve the top-k results. (This strategy was called `ExactRetrievalStrategy` prior to version 0.2.0.)\n",
|
||||
"\n",
|
||||
"To use this, specify `DenseVectorScriptScoreStrategy` in `ElasticsearchStore` constructor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ced32701",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_elasticsearch import SparseVectorStrategy\n",
|
||||
"\n",
|
||||
"db = ElasticsearchStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" es_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"test\",\n",
|
||||
" strategy=DenseVectorScriptScoreStrategy(),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "92c9cc33",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### BM25Strategy\n",
|
||||
"\n",
|
||||
"Finally, you can use full-text keyword search.\n",
|
||||
"\n",
|
||||
"To use this, specify `BM25Strategy` in `ElasticsearchStore` constructor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9fd59f69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_elasticsearch import BM25Strategy\n",
|
||||
"\n",
|
||||
"db = ElasticsearchStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" es_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"test\",\n",
|
||||
" strategy=BM25Strategy(),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6556d3c6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### BM25RetrievalStrategy\n",
|
||||
"\n",
|
||||
"This strategy allows the user to perform searches using pure BM25 without vector search.\n",
|
||||
"\n",
|
||||
"To use this, specify `BM25RetrievalStrategy` in `ElasticsearchStore` constructor.\n",
|
||||
"\n",
|
||||
"Note that in the example below, the embedding option is not specified, indicating that the search is conducted without using embeddings.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "478af4bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_elasticsearch import ElasticsearchStore\n",
|
||||
"\n",
|
||||
"db = ElasticsearchStore(\n",
|
||||
" es_url=\"http://localhost:9200\",\n",
|
||||
" index_name=\"test_index\",\n",
|
||||
" strategy=ElasticsearchStore.BM25RetrievalStrategy(),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"db.add_texts(\n",
|
||||
" [\"foo\", \"foo bar\", \"foo bar baz\", \"bar\", \"bar baz\", \"baz\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"results = db.similarity_search(query=\"foo\", k=10)\n",
|
||||
"print(results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ed899034",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Customise the Query\n",
|
||||
"\n",
|
||||
"With `custom_query` parameter at search, you are able to adjust the query that is used to retrieve documents from Elasticsearch. This is useful if you want to use a more complex query, to support linear boosting of fields.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e0ab7c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Example of a custom query thats just doing a BM25 search on the text field.\n",
|
||||
"def custom_query(query_body: dict, query: str):\n",
|
||||
" \"\"\"Custom query to be used in Elasticsearch.\n",
|
||||
" Args:\n",
|
||||
" query_body (dict): Elasticsearch query body.\n",
|
||||
" query (str): Query string.\n",
|
||||
" Returns:\n",
|
||||
" dict: Elasticsearch query body.\n",
|
||||
" \"\"\"\n",
|
||||
" print(\"Query Retriever created by the retrieval strategy:\")\n",
|
||||
" print(query_body)\n",
|
||||
" print()\n",
|
||||
"\n",
|
||||
" new_query_body = {\"query\": {\"match\": {\"text\": query}}}\n",
|
||||
"\n",
|
||||
" print(\"Query thats actually used in Elasticsearch:\")\n",
|
||||
" print(new_query_body)\n",
|
||||
" print()\n",
|
||||
"\n",
|
||||
" return new_query_body\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"results = db.similarity_search(\n",
|
||||
" \"What did the president say about Ketanji Brown Jackson\",\n",
|
||||
" k=4,\n",
|
||||
" custom_query=custom_query,\n",
|
||||
")\n",
|
||||
"print(\"Results:\")\n",
|
||||
"print(results[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "15ebbe22",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Customize the Document Builder\n",
|
||||
"\n",
|
||||
"With `doc_builder` parameter at search, you are able to adjust how a Document is being built using data retrieved from Elasticsearch. This is especially useful if you have indices which were not created using Langchain.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4cf81750",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Dict\n",
|
||||
"\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def custom_document_builder(hit: Dict) -> Document:\n",
|
||||
" src = hit.get(\"_source\", {})\n",
|
||||
" return Document(\n",
|
||||
" page_content=src.get(\"content\", \"Missing content!\"),\n",
|
||||
" metadata={\n",
|
||||
" \"page_number\": src.get(\"page_number\", -1),\n",
|
||||
" \"original_filename\": src.get(\"original_filename\", \"Missing filename!\"),\n",
|
||||
" },\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"results = db.similarity_search(\n",
|
||||
" \"What did the president say about Ketanji Brown Jackson\",\n",
|
||||
" k=4,\n",
|
||||
" doc_builder=custom_document_builder,\n",
|
||||
")\n",
|
||||
"print(\"Results:\")\n",
|
||||
"print(results[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17b509ae",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -436,7 +436,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"##delete and get function need to maintian docids\n",
|
||||
"##delete and get function need to maintain docids\n",
|
||||
"##your docid\n",
|
||||
"\n",
|
||||
"res_d = vearch_standalone.delete(\n",
|
||||
|
||||
@@ -8,20 +8,35 @@
|
||||
"# Vectara\n",
|
||||
"\n",
|
||||
"[Vectara](https://vectara.com/) is the trusted AI Assistant and Agent platform which focuses on enterprise readiness for mission-critical applications.\n",
|
||||
"\n",
|
||||
"Vectara serverless RAG-as-a-service provides all the components of RAG behind an easy-to-use API, including:\n",
|
||||
"1. A way to extract text from files (PDF, PPT, DOCX, etc)\n",
|
||||
"2. ML-based chunking that provides state of the art performance.\n",
|
||||
"3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.\n",
|
||||
"4. Its own internal vector database where text chunks and embedding vectors are stored.\n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments (including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). \n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments, including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). \n",
|
||||
"6. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"See the [Vectara API documentation](https://docs.vectara.com/docs/) for more information on how to use the API.\n",
|
||||
"For more information:\n",
|
||||
"- [Documentation](https://docs.vectara.com/docs/)\n",
|
||||
"- [API Playground](https://docs.vectara.com/docs/rest-api/)\n",
|
||||
"- [Quickstart](https://docs.vectara.com/docs/quickstart)\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the basic retrieval functionality, when utilizing Vectara just as a Vector Store (without summarization), incuding: `similarity_search` and `similarity_search_with_score` as well as using the LangChain `as_retriever` functionality.\n",
|
||||
"\n",
|
||||
"You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration"
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use the `VectaraVectorStore` you first need to install the partner package.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dfdf03ba-d6f5-4b1e-86d3-a65c4bc99aa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!uv pip install -U pip && uv pip install -qU langchain-vectara"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -32,14 +47,14 @@
|
||||
"# Getting Started\n",
|
||||
"\n",
|
||||
"To get started, use the following steps:\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara trial. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||
"1. If you don't already have one, [Sign up](https://www.vectara.com/integrations/langchain) for your free Vectara trial.\n",
|
||||
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Access Control\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query-only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||
"\n",
|
||||
"To use LangChain with Vectara, you'll need to have these three values: `customer ID`, `corpus ID` and `api_key`.\n",
|
||||
"You can provide those to LangChain in two ways:\n",
|
||||
"To use LangChain with Vectara, you'll need to have these two values: `corpus_key` and `api_key`.\n",
|
||||
"You can provide `VECTARA_API_KEY` to LangChain in two ways:\n",
|
||||
"\n",
|
||||
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||
"1. Include in your environment these two variables: `VECTARA_API_KEY`.\n",
|
||||
"\n",
|
||||
" For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||
"\n",
|
||||
@@ -47,8 +62,6 @@
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = getpass.getpass(\"Vectara Customer ID:\")\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = getpass.getpass(\"Vectara Corpus ID:\")\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
@@ -56,10 +69,8 @@
|
||||
"\n",
|
||||
"```python\n",
|
||||
"vectara = Vectara(\n",
|
||||
" vectara_customer_id=vectara_customer_id,\n",
|
||||
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
" )\n",
|
||||
" vectara_api_key=vectara_api_key\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"In this notebook we assume they are provided in the environment."
|
||||
@@ -67,23 +78,29 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "aac7a9a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<YOUR_VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_ID\"] = \"<YOUR_VECTARA_CORPUS_ID>\"\n",
|
||||
"os.environ[\"VECTARA_CUSTOMER_ID\"] = \"<YOUR_VECTARA_CUSTOMER_ID>\"\n",
|
||||
"os.environ[\"VECTARA_API_KEY\"] = \"<VECTARA_API_KEY>\"\n",
|
||||
"os.environ[\"VECTARA_CORPUS_KEY\"] = \"VECTARA_CORPUS_KEY\"\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import Vectara\n",
|
||||
"from langchain_community.vectorstores.vectara import (\n",
|
||||
" RerankConfig,\n",
|
||||
" SummaryConfig,\n",
|
||||
"from langchain_vectara import Vectara\n",
|
||||
"from langchain_vectara.vectorstores import (\n",
|
||||
" ChainReranker,\n",
|
||||
" CorpusConfig,\n",
|
||||
" CustomerSpecificReranker,\n",
|
||||
" File,\n",
|
||||
" GenerationConfig,\n",
|
||||
" MmrReranker,\n",
|
||||
" SearchConfig,\n",
|
||||
" VectaraQueryConfig,\n",
|
||||
")"
|
||||
")\n",
|
||||
"\n",
|
||||
"vectara = Vectara(vectara_api_key=os.getenv(\"VECTARA_API_KEY\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -91,21 +108,37 @@
|
||||
"id": "875ffb7e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we load the state-of-the-union text into Vectara. \n",
|
||||
"First we load the state-of-the-union text into Vectara.\n",
|
||||
"\n",
|
||||
"Note that we use the `from_files` interface which does not require any local processing or chunking - Vectara receives the file content and performs all the necessary pre-processing, chunking and embedding of the file into its knowledge store.\n",
|
||||
"Note that we use the add_files interface which does not require any local processing or chunking - Vectara receives the file content and performs all the necessary pre-processing, chunking and embedding of the file into its knowledge store.\n",
|
||||
"\n",
|
||||
"In this case it uses a `.txt` file but the same works for many other [file types](https://docs.vectara.com/docs/api-reference/indexing-apis/file-upload/file-upload-filetypes)."
|
||||
"In this case it uses a .txt file but the same works for many other [file types](https://docs.vectara.com/docs/api-reference/indexing-apis/file-upload/file-upload-filetypes)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"id": "be0a4973",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['state_of_the_union.txt']"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vectara = Vectara.from_files([\"state_of_the_union.txt\"])"
|
||||
"corpus_key = os.getenv(\"VECTARA_CORPUS_KEY\")\n",
|
||||
"file_obj = File(\n",
|
||||
" file_path=\"../document_loaders/example_data/state_of_the_union.txt\",\n",
|
||||
" metadata={\"source\": \"text_file\"},\n",
|
||||
")\n",
|
||||
"vectara.add_files([file_obj], corpus_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -113,38 +146,52 @@
|
||||
"id": "22a6b953",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Vectara RAG (retrieval augmented generation)\n",
|
||||
"## Vectara RAG (retrieval augmented generation)\n",
|
||||
"\n",
|
||||
"We now create a `VectaraQueryConfig` object to control the retrieval and summarization options:\n",
|
||||
"* We enable summarization, specifying we would like the LLM to pick the top 7 matching chunks and respond in English\n",
|
||||
"* We enable MMR (max marginal relevance) in the retrieval process, with a 0.2 diversity bias factor\n",
|
||||
"* We want the top-10 results, with hybrid search configured with a value of 0.025\n",
|
||||
"\n",
|
||||
"Using this configuration, let's create a LangChain `Runnable` object that encpasulates the full Vectara RAG pipeline, using the `as_rag` method:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"id": "9ecda054-96a8-4a91-aeae-32006efb1ac8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Biden addressed various topics in his statements. He highlighted the need to confront Putin by building a coalition of nations[1]. He also expressed commitment to investigating the impact of burn pits on soldiers' health, including his son's case[2]. Additionally, Biden outlined a plan to fight inflation by cutting prescription drug costs[3]. He emphasized the importance of continuing to combat COVID-19 and not just accepting living with it[4]. Furthermore, he discussed measures to weaken Russia economically and target Russian oligarchs[6]. Biden also advocated for passing the Equality Act to support LGBTQ+ Americans and condemned state laws targeting transgender individuals[7].\""
|
||||
"\"President Biden discussed several key issues in his recent statements. He emphasized the importance of keeping schools open and noted that with a high vaccination rate and reduced hospitalizations, most Americans can safely return to normal activities without masks [1]. He addressed the need to hold social media platforms accountable for their impact on children and called for stronger privacy protections and mental health services [2]. Biden also announced measures against Russian oligarchs, including closing American airspace to Russian flights and targeting their assets, as part of efforts to weaken Russia's economy [3], [7]. Additionally, he reaffirmed the need to protect women's rights, particularly the right to choose as affirmed in Roe v. Wade [5].\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang=\"eng\")\n",
|
||||
"rerank_config = RerankConfig(reranker=\"mmr\", rerank_k=50, mmr_diversity_bias=0.2)\n",
|
||||
"generation_config = GenerationConfig(\n",
|
||||
" max_used_search_results=7,\n",
|
||||
" response_language=\"eng\",\n",
|
||||
" generation_preset_name=\"vectara-summary-ext-24-05-med-omni\",\n",
|
||||
" enable_factual_consistency_score=True,\n",
|
||||
")\n",
|
||||
"search_config = SearchConfig(\n",
|
||||
" corpora=[CorpusConfig(corpus_key=corpus_key)],\n",
|
||||
" limit=25,\n",
|
||||
" reranker=ChainReranker(\n",
|
||||
" rerankers=[\n",
|
||||
" CustomerSpecificReranker(reranker_id=\"rnk_272725719\", limit=100),\n",
|
||||
" MmrReranker(diversity_bias=0.2, limit=100),\n",
|
||||
" ]\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config\n",
|
||||
" search=search_config,\n",
|
||||
" generation=generation_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"query_str = \"what did Biden say?\"\n",
|
||||
@@ -163,7 +210,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 7,
|
||||
"id": "27f01330-8917-4eff-b603-59ab2571a4d2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -171,7 +218,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Biden addressed various topics in his statements. He highlighted the importance of building coalitions to confront global challenges [1]. He also expressed commitment to investigating the impact of burn pits on soldiers' health, including his son's case [2, 4]. Additionally, Biden outlined his plan to combat inflation by cutting prescription drug costs and reducing the deficit, with support from Nobel laureates and business leaders [3]. He emphasized the ongoing fight against COVID-19 and the need to continue combating the virus [5]. Furthermore, Biden discussed measures taken to weaken Russia's economic and military strength, targeting Russian oligarchs and corrupt leaders [6]. He also advocated for passing the Equality Act to support LGBTQ+ Americans and address discriminatory state laws [7]."
|
||||
"President Biden discussed several key issues in his recent statements. He emphasized the importance of keeping schools open and noted that with a high vaccination rate and reduced hospitalizations, most Americans can safely return to normal activities without masks [1]. He addressed the need to hold social media platforms accountable for their impact on children and called for stronger privacy protections and mental health services [2]. Biden also announced measures against Russia, including preventing its central bank from defending the Ruble and targeting Russian oligarchs' assets, as part of efforts to weaken Russia's economy and military [3]. Additionally, he reaffirmed the commitment to protect women's rights, particularly the right to choose as affirmed in Roe v. Wade [5]. Lastly, he advocated for funding the police with necessary resources and training to ensure community safety [6]."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -203,7 +250,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 8,
|
||||
"id": "b2e0aa2c-7c8e-4d79-8abc-66f5a1f961b3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -211,19 +258,12 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Biden addressed various topics in his statements. He highlighted the need to confront Putin by building a coalition of nations[1]. He also expressed his commitment to investigating the impact of burn pits on soldiers' health, referencing his son's experience[2]. Additionally, Biden discussed his plan to fight inflation by cutting prescription drug costs and garnering support from Nobel laureates and business leaders[4]. Furthermore, he emphasized the importance of continuing to combat COVID-19 and not merely accepting living with the virus[5]. Biden's remarks encompassed international relations, healthcare challenges faced by soldiers, economic strategies, and the ongoing battle against the pandemic.\n",
|
||||
"Vectara FCS = 0.41796625\n"
|
||||
"President Biden discussed several key topics in his recent statements. He emphasized the importance of keeping schools open and noted that with a high vaccination rate and reduced hospitalizations, most Americans can safely return to normal activities without masks [1]. He addressed the need to hold social media platforms accountable for their impact on children and called for stronger privacy protections and mental health services [2]. Biden also announced measures against Russian oligarchs, including closing American airspace to Russian flights and targeting their assets, as part of efforts to weaken Russia's economy [3], [7]. Additionally, he reaffirmed the need to protect women's rights, particularly the right to choose as affirmed in Roe v. Wade [5].\n",
|
||||
"Vectara FCS = 0.61621094\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"summary_config = SummaryConfig(is_enabled=True, max_results=5, response_lang=\"eng\")\n",
|
||||
"rerank_config = RerankConfig(reranker=\"mmr\", rerank_k=50, mmr_diversity_bias=0.1)\n",
|
||||
"config = VectaraQueryConfig(\n",
|
||||
" k=10, lambda_val=0.005, rerank_config=rerank_config, summary_config=summary_config\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"rag = vectara.as_rag(config)\n",
|
||||
"resp = rag.invoke(query_str)\n",
|
||||
"print(resp[\"answer\"])\n",
|
||||
"print(f\"Vectara FCS = {resp['fcs']}\")"
|
||||
@@ -243,26 +283,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 9,
|
||||
"id": "19cd2f86",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did. We prepared extensively and carefully. We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin.', metadata={'lang': 'eng', 'section': '1', 'offset': '2160', 'len': '36', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='When they came home, many of the world’s fittest and best trained warriors were never the same. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. I know. \\n\\nOne of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can.', metadata={'lang': 'eng', 'section': '1', 'offset': '34652', 'len': '60', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. Danielle says Heath was a fighter to the very end. He didn’t know how to stop fighting, and neither did she. Through her pain she found purpose to demand we do better. Tonight, Danielle—we are.', metadata={'lang': 'eng', 'section': '1', 'offset': '35442', 'len': '57', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'})]"
|
||||
"[Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='When they came home, many of the world’s fittest and best trained warriors were never the same. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. I know. \\n\\nOne of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='He rejected repeated efforts at diplomacy. He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did. We prepared extensively and carefully.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='Putin’s latest attack on Ukraine was premeditated and unprovoked. He rejected repeated efforts at diplomacy. He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did.')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"config.summary_config.is_enabled = False\n",
|
||||
"config.k = 3\n",
|
||||
"config.generation = None\n",
|
||||
"config.search.limit = 5\n",
|
||||
"retriever = vectara.as_retriever(config=config)\n",
|
||||
"retriever.invoke(query_str)"
|
||||
]
|
||||
@@ -277,27 +319,34 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"id": "59268e9a-6089-4bb2-8c61-1ea6b956f83c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did. We prepared extensively and carefully. We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin.', metadata={'lang': 'eng', 'section': '1', 'offset': '2160', 'len': '36', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='When they came home, many of the world’s fittest and best trained warriors were never the same. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. I know. \\n\\nOne of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can.', metadata={'lang': 'eng', 'section': '1', 'offset': '34652', 'len': '60', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content='But cancer from prolonged exposure to burn pits ravaged Heath’s lungs and body. Danielle says Heath was a fighter to the very end. He didn’t know how to stop fighting, and neither did she. Through her pain she found purpose to demand we do better. Tonight, Danielle—we are.', metadata={'lang': 'eng', 'section': '1', 'offset': '35442', 'len': '57', 'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'vectara'}),\n",
|
||||
" Document(page_content=\"Biden discussed various topics in his statements. He highlighted the importance of unity and preparation to confront challenges, such as building coalitions to address global issues [1]. Additionally, he shared personal stories about the impact of health issues on soldiers, including his son's experience with brain cancer possibly linked to burn pits [2]. Biden also outlined his plans to combat inflation by cutting prescription drug costs and emphasized the ongoing efforts to combat COVID-19, rejecting the idea of merely living with the virus [4, 5]. Overall, Biden's messages revolved around unity, healthcare challenges faced by soldiers, economic plans, and the ongoing fight against COVID-19.\", metadata={'summary': True, 'fcs': 0.54751414})]"
|
||||
"[Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='We won’t be able to compete for the jobs of the 21st Century if we don’t fix that. That’s why it was so important to pass the Bipartisan Infrastructure Law—the most sweeping investment to rebuild America in history. This was a bipartisan effort, and I want to thank the members of both parties who worked to make it happen. We’re done talking about infrastructure weeks. We’re going to have an infrastructure decade.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains. And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='When they came home, many of the world’s fittest and best trained warriors were never the same. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. I know. \\n\\nOne of those soldiers was my son Major Beau Biden. We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. But I’m committed to finding out everything we can.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come. Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. We are joining with our European allies to find and seize your yachts your luxury apartments your private jets.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='He rejected repeated efforts at diplomacy. He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did. We prepared extensively and carefully.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='It delivered immediate economic relief for tens of millions of Americans. Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. And as my Dad used to say, it gave people a little breathing room. And unlike the $2 Trillion tax cut passed in the previous administration that benefitted the top 1% of Americans, the American Rescue Plan helped working people—and left no one behind. Lots of jobs. \\n\\nIn fact—our economy created over 6.5 Million new jobs just last year, more jobs created in one year \\nthan ever before in the history of America.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='All told, we created 369,000 new manufacturing jobs in America just last year. Powered by people I’ve met like JoJo Burgess, from generations of union steelworkers from Pittsburgh, who’s here with us tonight. As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” It’s time. \\n\\nBut with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. Inflation is robbing them of the gains they might otherwise feel.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='Putin’s latest attack on Ukraine was premeditated and unprovoked. He rejected repeated efforts at diplomacy. He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. We were ready. Here is what we did.'),\n",
|
||||
" Document(metadata={'X-TIKA:Parsed-By': 'org.apache.tika.parser.csv.TextAndCSVParser', 'Content-Encoding': 'UTF-8', 'X-TIKA:detectedEncoding': 'UTF-8', 'X-TIKA:encodingDetector': 'UniversalEncodingDetector', 'Content-Type': 'text/plain; charset=UTF-8', 'source': 'text_file', 'framework': 'langchain'}, page_content='Danielle says Heath was a fighter to the very end. He didn’t know how to stop fighting, and neither did she. Through her pain she found purpose to demand we do better. Tonight, Danielle—we are. The VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits.'),\n",
|
||||
" Document(metadata={'summary': True, 'fcs': (0.54785156,)}, page_content='President Biden spoke about several key issues. He emphasized the importance of the Bipartisan Infrastructure Law, calling it the most significant investment to rebuild America and highlighting it as a bipartisan effort [1]. He also announced measures against Russian oligarchs, including assembling a task force to seize their assets and closing American airspace to Russian flights, further isolating Russia economically [2]. Additionally, he expressed a commitment to investigating the health impacts of burn pits on military personnel, referencing his son, Major Beau Biden, who suffered from brain cancer [3].')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"config.summary_config.is_enabled = True\n",
|
||||
"config.k = 3\n",
|
||||
"config.generation = GenerationConfig()\n",
|
||||
"config.search.limit = 10\n",
|
||||
"retriever = vectara.as_retriever(config=config)\n",
|
||||
"retriever.invoke(query_str)"
|
||||
]
|
||||
@@ -316,17 +365,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 14,
|
||||
"id": "e14325b9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Biden's statement highlighted his efforts to unite freedom-loving nations against Putin's aggression, sharing information in advance to counter Russian lies and hold Putin accountable[1]. Additionally, he emphasized his commitment to military families, like Danielle Robinson, and outlined plans for more affordable housing, Pre-K for 3- and 4-year-olds, and ensuring no additional taxes for those earning less than $400,000 a year[2][3]. The statement also touched on the readiness of the West and NATO to respond to Putin's actions, showcasing extensive preparation and coalition-building efforts[4]. Heath Robinson's story, a combat medic who succumbed to cancer from burn pits, was used to illustrate the resilience and fight for better conditions[5].\""
|
||||
"'The remarks made by Biden include his emphasis on the importance of the Bipartisan Infrastructure Law, which he describes as the most significant investment to rebuild America in history. He highlights the bipartisan effort involved in passing this law and expresses gratitude to members of both parties for their collaboration. Biden also mentions the transition from \"infrastructure weeks\" to an \"infrastructure decade\" [1]. Additionally, he shares a personal story about his father having to leave their home in Scranton, Pennsylvania, to find work, which influenced his decision to fight for the American Rescue Plan to help those in need [2].'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -371,7 +420,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.8"
|
||||
"version": "3.12.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -90,16 +90,9 @@
|
||||
"export LANGSMITH_PROJECT=\"default\" # or any other project name\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Or, if in a notebook, you can set them with:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "599bb688",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"Or, if in a notebook, you can set them with:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
@@ -125,7 +118,8 @@
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\n",
|
||||
" prompt=\"Enter your OpenAI API key (required if using OpenAI): \"\n",
|
||||
" )"
|
||||
" )\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"\n",
|
||||
"In the reduce step, `MapReduceDocumentsChain` supports a recursive \"collapsing\" of the summaries: the inputs would be partitioned based on a token limit, and summaries would be generated of the partitions. This step would be repeated until the total length of the summaries was within a desired limit, allowing for the summarization of arbitrary-length text. This is particularly useful for models with smaller context windows.\n",
|
||||
"\n",
|
||||
"LangGraph suports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows, and confers a number of advantages for this problem:\n",
|
||||
"LangGraph supports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows, and confers a number of advantages for this problem:\n",
|
||||
"\n",
|
||||
"- LangGraph allows for individual steps (such as successive summarizations) to be streamed, allowing for greater control of execution;\n",
|
||||
"- LangGraph's [checkpointing](https://langchain-ai.github.io/langgraph/how-tos/persistence/) supports error recovery, extending with human-in-the-loop workflows, and easier incorporation into conversational applications.\n",
|
||||
|
||||
@@ -169,6 +169,14 @@ DATABASE_TOOL_FEAT_TABLE = {
|
||||
},
|
||||
}
|
||||
|
||||
FINANCE_TOOL_FEAT_TABLE = {
|
||||
"GOAT": {
|
||||
"link": "/docs/integrations/tools/goat",
|
||||
"pricing": "Free",
|
||||
"capabilities": "Create and receive payments, purchase physical goods, make investments, and more.",
|
||||
},
|
||||
}
|
||||
|
||||
TOOLS_TEMPLATE = """\
|
||||
---
|
||||
sidebar_position: 0
|
||||
@@ -220,6 +228,12 @@ The following table shows tools that can be used to automate tasks in databases:
|
||||
|
||||
{database_table}
|
||||
|
||||
## Finance
|
||||
|
||||
The following table shows tools that can be used to execute financial transactions such as payments, purchases, and more:
|
||||
|
||||
{finance_table}
|
||||
|
||||
## All tools
|
||||
|
||||
import {{ IndexTable }} from "@theme/FeatureTables";
|
||||
@@ -290,6 +304,22 @@ def get_database_table() -> str:
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
def get_finance_table() -> str:
|
||||
"""Get the table of finance tools."""
|
||||
header = ["tool", "pricing", "capabilities"]
|
||||
title = ["Tool/Toolkit", "Pricing", "Capabilities"]
|
||||
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
||||
for finance_tool, feats in sorted(FINANCE_TOOL_FEAT_TABLE.items()):
|
||||
# Fields are in the order of the header
|
||||
row = [
|
||||
f"[{finance_tool}]({feats['link']})",
|
||||
]
|
||||
for h in header[1:]:
|
||||
row.append(feats.get(h))
|
||||
rows.append(row)
|
||||
return "\n".join(["|".join(row) for row in rows])
|
||||
|
||||
|
||||
def get_search_tools_table() -> str:
|
||||
"""Get the table of search tools."""
|
||||
header = ["tool", "pricing", "available_data"]
|
||||
@@ -355,6 +385,7 @@ if __name__ == "__main__":
|
||||
productivity_table=get_productivity_table(),
|
||||
webbrowsing_table=get_webbrowsing_table(),
|
||||
database_table=get_database_table(),
|
||||
finance_table=get_finance_table(),
|
||||
)
|
||||
with open(output_integrations_dir / "tools" / "index.mdx", "w") as f:
|
||||
f.write(tools_page)
|
||||
|
||||
436
docs/static/llms.txt
vendored
Normal file
436
docs/static/llms.txt
vendored
Normal file
@@ -0,0 +1,436 @@
|
||||
# LangChain
|
||||
|
||||
## High level
|
||||
|
||||
[Why LangChain?](https://python.langchain.com/docs/concepts/why_langchain/): considering using LangChain, when building complex AI applications, and when needing to evaluate AI applications This page discusses the main reasons to use LangChain: standardized component interfaces, orchestration capabilities, and observability/evaluation through LangSmith
|
||||
[Architecture](https://python.langchain.com/docs/concepts/architecture/): needing an overview of the LangChain architecture, exploring the various packages and components, or deciding which parts to use for a specific application. Provides a high-level overview of the different packages that make up the LangChain framework, including langchain-core, langchain, integration packages, langchain-community, langgraph, langserve, and LangSmith.
|
||||
|
||||
## Concepts
|
||||
|
||||
[Chat Models](https://python.langchain.com/docs/concepts/chat_models/): building applications using chat models, learning about chat model interfaces and features, or interested in integrating chat models with external tools and services. Provides an overview of chat models in LangChain, including their features, integration options, interfaces, tool calling, structured outputs, multimodality, context windows, and advanced topics like rate-limiting and caching.
|
||||
[Messages](https://python.langchain.com/docs/concepts/messages/): querying LangChain's chat message format, understanding different message types, building chat applications. Messages are the unit of communication in chat models, representing input/output with roles, content, metadata. Covers SystemMessage, HumanMessage, AIMessage, AIMessageChunk, ToolMessage, RemoveMessage, and legacy FunctionMessage.
|
||||
[Chat history](https://python.langchain.com/docs/concepts/chat_history/): dealing with chat history, managing chat context, or understanding conversation patterns. Covers chat history structure, conversation patterns between user/assistant/tools, and guidelines for managing chat history to stay within context window.
|
||||
[Tools](https://python.langchain.com/docs/concepts/tools/): needing an overview of tools in LangChain, wanting to create custom tools, or learning how to pass runtime values to tools. Tools are a way to encapsulate functions with schemas that can be passed to chat models supporting tool calling. The page covers the tool interface, creating tools using the @tool decorator, configuring tool schemas, tool artifacts, special type annotations like InjectedToolArg, and toolkits.
|
||||
[tool calling](https://python.langchain.com/docs/concepts/tool_calling/): needing to understand how to enable tool calling functionality, how to create tools from functions, how to bind tools to a model that supports tool calling. The page covers the key concepts of tool calling, including tool creation using decorators, tool binding to models, tool calling by models, and tool execution. It provides an overview, recommended usage, and best practices.
|
||||
[structured outputs](https://python.langchain.com/docs/concepts/structured_outputs/): it needs to return output in a structured format, when working with databases or APIs that require structured data, or when building applications with structured responses. Covers structured output concepts like schema definition and methods like tool calling and JSON mode, as well as helper functions, to instruct models to produce structured outputs conforming to a given schema.
|
||||
[Memory](https://langchain-ai.github.io/langgraph/concepts/memory/): developing agents with memory capabilities, implementing memory management strategies, or learning about different types of memory for AI agents. Covers topics related to short-term and long-term memory for agents, techniques for managing conversation history and summarizing past conversations, different types of memory (semantic, episodic, procedural), and approaches for writing memories in the hot path or in the background.
|
||||
[Multimodality](https://python.langchain.com/docs/concepts/multimodality/): needing to understand multimodal capabilities, using chat models with multimodal inputs, or using multimodal retrieval/embeddings. Discusses ability of LangChain components like chat models, embedding models, and vector stores to handle multimodal data like text, images, audio, video. Covers current status and limitations around multimodal inputs and outputs for chat models.
|
||||
[invoke](https://python.langchain.com/docs/concepts/runnables/): learning how to use the Runnable interface, when working with custom Runnables, and when needing to configure Runnables at runtime. The page covers the Runnable interface, its methods for invocation, batching, streaming, inspecting schemas, and configuration. It explains RunnableConfig, custom Runnables, and configurable Runnables.
|
||||
[stream](https://python.langchain.com/docs/concepts/streaming/): [building applications that use streaming, building applications that need to display partial results in real-time, building applications that need to provide updates on pipeline or workflow progress] 'This page covers streaming in LangChain, including what can be streamed in LLM applications, the streaming APIs available, how to write custom data to the stream, and how LangChain automatically enables streaming for chat models in certain cases.'
|
||||
[LCEL](https://python.langchain.com/docs/concepts/lcel/): needing an overview of the LangChain Expression Language (LCEL), deciding whether to use LCEL or not, and understanding how to compose chains using LCEL primitives. Provides an overview of the LCEL, a declarative approach to building chains from existing Runnables, covering its benefits, composition primitives like RunnableSequence and RunnableParallel, the composition syntax, automatic type coercion, and guidance on when to use LCEL versus alternatives like LangGraph.
|
||||
[Document Loaders](https://python.langchain.com/docs/concepts/document_loaders/): needing to load data from various sources like files, webpages, or databases, or when handling large datasets with lazy loading. Document loaders help load data from different sources into a standardized Document object format, with options for lazy loading of large datasets.
|
||||
[Retrieval](https://python.langchain.com/docs/concepts/retrieval/): building retrieval systems, understanding query analysis, integrating with databases This page covers key concepts and techniques in retrieval systems, including query analysis (re-writing and construction), vector and lexical indexes, databases, and LangChain's unified retriever interface.
|
||||
[Text Splitters](https://python.langchain.com/docs/concepts/text_splitters/): working with long documents, handling limited model input sizes, or optimizing retrieval systems This page discusses different strategies for splitting large texts into smaller chunks, including length-based, text structure-based, document structure-based, and semantic meaning-based approaches.
|
||||
[Embedding Models](https://python.langchain.com/docs/concepts/embedding_models/): LLM should read this page when: 1) Working with text embeddings for search/retrieval 2) Comparing text similarity using embedding vectors 3) Selecting or integrating text embedding models It covers key concepts of embedding models: converting text to numerical vectors, measuring similarity between vectors, embedding models (historical context, interface, integrations), and common similarity metrics (cosine, Euclidean, dot product).
|
||||
[Vector stores](https://python.langchain.com/docs/concepts/vectorstores/): LLM should read this page when: 1) Building applications that need to index and retrieve information based on semantic similarity 2) Integrating vector databases into their application 3) Exploring advanced vector search and retrieval techniques Vector stores are specialized data stores that enable indexing and retrieving information based on vector representations (embeddings) of data, allowing semantic similarity search over unstructured data like text, images, and audio. The page covers vector store integrations, the core interface, adding/deleting documents, basic and advanced similarity search techniques, and concepts like metadata filtering.
|
||||
[Retrievers](https://python.langchain.com/docs/concepts/retrievers/): building a retrieval system, integrating different retrieval sources, or linking retrieved information to source documents. This page outlines the retriever interface in LangChain, common types of retrievers such as vector stores and search APIs, and advanced retrieval patterns like ensembling and retaining source document information.
|
||||
[Retrieval Augmented Generation (RAG)](https://python.langchain.com/docs/concepts/rag/): developing applications that incorporate retrieval and generation, building question-answering systems with external data sources, or optimizing knowledge retrieval and integration into language models. Covers the concept of Retrieval Augmented Generation (RAG), which combines retrieval systems with language models to utilize external knowledge, access up-to-date information, leverage domain-specific expertise, reduce hallucination, and integrate knowledge cost-effectively.
|
||||
[Agents](https://python.langchain.com/docs/concepts/agents/): building AI agents or systems that take high-level tasks and perform a series of actions to accomplish them, transitioning from the legacy AgentExecutor to the newer and more flexible LangGraph system. Provides an overview of agents in LangChain, the legacy AgentExecutor concept, resources for using AgentExecutor, and guidance on migrating to the preferred LangGraph architecture for building customizable agents.
|
||||
[Prompt Templates](https://python.langchain.com/docs/concepts/prompt_templates/): creating prompts for language models, formatting chat messages, slotting messages into specific locations in a prompt. This page covers different types of prompt templates (string, chat, messages placeholder) for formatting prompts for language models and chat models.
|
||||
[Output Parsers](https://python.langchain.com/docs/concepts/output_parsers/): looking for ways to extract structured data from model outputs, parsing model outputs into different formats, or handling errors in parsing. Covers various LangChain output parsers like JSON, XML, CSV, Pandas DataFrame, along with capabilities like output fixing, retrying, and using user-defined formats.
|
||||
[Few-shot prompting](https://python.langchain.com/docs/concepts/few_shot_prompting/): needing to improve model performance, when deciding how to format few-shot examples, when selecting examples for few-shot prompting The page covers generating examples, number of examples, selecting examples, and formatting examples for few-shot prompting with language models.
|
||||
[Example Selectors](https://python.langchain.com/docs/concepts/example_selectors/): selecting examples for few-shot prompting, dynamically choosing examples for prompts, or understanding different example selection techniques. The page covers example selectors, which are classes responsible for selecting and formatting examples to include as part of prompts for improved performance with few-shot learning.
|
||||
[Async programming](https://python.langchain.com/docs/concepts/async/): building asynchronous applications with LangChain, working with async runnables, or handling async API calls. Explains LangChain's asynchronous APIs, delegation to sync methods, performance considerations, compatibility with asyncio, and usage in Jupyter notebooks.
|
||||
[Callbacks](https://python.langchain.com/docs/concepts/callbacks/): [needing to log, monitor, or stream events in an LLM application] [This page covers LangChain's callback system, which allows hooking into various stages of an LLM application for logging, monitoring, streaming, and other purposes. It explains the different callback events, callback handlers, and how to pass callbacks.]
|
||||
[Tracing](https://python.langchain.com/docs/concepts/tracing/): tracing the steps of a chain/agent for debugging, understanding the chain's flow, or inspecting intermediary outputs. Discusses the concept of tracing in LangChain, including that traces contain runs which are individual steps, and that tracing provides observability into chains/agents.
|
||||
[Evaluation](https://python.langchain.com/docs/concepts/evaluation/): evaluating the performance of LLM-powered applications, creating or curating datasets, defining metrics for evaluation This page covers the concept of evaluation in LangChain, including using LangSmith to create datasets, define metrics, track results over time, and run evaluations automatically.
|
||||
[Testing](https://python.langchain.com/docs/concepts/testing/): testing LangChain components, implementing unit tests, or setting up integration tests This page explains unit tests, integration tests, and standard tests in LangChain, including code examples
|
||||
|
||||
## How-to guides
|
||||
|
||||
### Installation
|
||||
|
||||
[How to: install LangChain packages](https://python.langchain.com/docs/how_to/installation/): installing LangChain packages, learning about the LangChain ecosystem packages, installing specific ecosystem packages This page explains how to install the main LangChain package, as well as different ecosystem packages like langchain-core, langchain-community, langchain-openai, langchain-experimental, langgraph, langserve, langchain-cli, and langsmith SDK.
|
||||
[How to: use LangChain with different Pydantic versions](https://python.langchain.com/docs/how_to/pydantic_compatibility/): needing to use LangChain with different Pydantic versions, needing to install Pydantic 2 with LangChain, or avoiding using the pydantic.v1 namespace with LangChain APIs. The page explains that LangChain 0.3 uses Pydantic 2 internally and advises users to install Pydantic 2 and avoid using the pydantic.v1 namespace with LangChain APIs.
|
||||
[How to: return structured data from a model](https://python.langchain.com/docs/how_to/structured_output/): LLM should read this page when: 1) wanting to return structured data from a model, 2) building applications that require structured outputs, 3) exploring techniques for parsing model outputs into objects or schemas. This page covers methods for obtaining structured outputs from language models, including using .with_structured_output(), prompting techniques with output parsers, and handling complex schemas with few-shot examples.
|
||||
[How to: use chat models to call tools](https://python.langchain.com/docs/how_to/tool_calling/): needing to call tools from chat models, wanting to use chat models to generate structured output, or doing extraction from text using chat models. Explains how to define tool schemas as Python functions, Pydantic/TypedDict classes, or LangChain Tools; bind them to chat models; retrieve tool calls from LLM responses; and optionally parse tool calls into structured objects.
|
||||
[How to: stream runnables](https://python.langchain.com/docs/how_to/streaming/): Line 1: 'wanting to learn how to stream LLM responses, stream intermediate steps, and configure streaming events.' Line 2: 'This page covers how to use the `stream` and `astream` methods to stream final outputs, how to use `astream_events` to stream both final outputs and intermediate steps, filtering events, propagating callbacks for streaming, and working with input streams.'
|
||||
[How to: debug your LLM apps](https://python.langchain.com/docs/how_to/debugging/): debugging LLM applications, adding print statements, or logging events for tracing. Covers setting verbose mode to print important events, debug mode to print all events, and using LangSmith for visualizing event traces.
|
||||
|
||||
### Components
|
||||
|
||||
These are the core building blocks you can use when building applications.
|
||||
|
||||
#### Chat models
|
||||
|
||||
[Chat Models](https://python.langchain.com/docs/concepts/chat_models/): building applications using chat models, learning about chat model interfaces and features, or interested in integrating chat models with external tools and services. Provides an overview of chat models in LangChain, including their features, integration options, interfaces, tool calling, structured outputs, multimodality, context windows, and advanced topics like rate-limiting and caching.
|
||||
[here](https://python.langchain.com/docs/integrations/chat/): integrating chat models into an application, using chat models for conversational AI tasks, or choosing between different chat model providers. Provides an overview of chat models integrated with LangChain, including OpenAI, Anthropic, Google, and others. Covers key features like tool calling, structured output, JSON mode, local usage, and multimodal support.
|
||||
|
||||
[How to: use chat models to call tools](https://python.langchain.com/docs/how_to/tool_calling/): needing to call tools from chat models, wanting to use chat models to generate structured output, or doing extraction from text using chat models. Explains how to define tool schemas as Python functions, Pydantic/TypedDict classes, or LangChain Tools; bind them to chat models; retrieve tool calls from LLM responses; and optionally parse tool calls into structured objects.
|
||||
[How to: get models to return structured output](https://python.langchain.com/docs/how_to/structured_output/): wanting to obtain structured output from an LLM, needing to parse JSON/XML/YAML output from an LLM, or looking to use few-shot examples with structured outputs. This page covers using the `.with_structured_output()` method to obtain structured data from LLMs, prompting techniques to elicit structured outputs, and parsing structured outputs.
|
||||
[How to: cache model responses](https://python.langchain.com/docs/how_to/chat_model_caching/): needing to cache ChatModel responses for efficiency, needing to reduce API calls for cost savings, or during development. This page covers how to use an in-memory cache or a SQLite database for caching ChatModel responses, which can improve performance and reduce costs.
|
||||
[How to: get log probabilities](https://python.langchain.com/docs/how_to/logprobs/): Line 1: 'seeking to get token-level log probabilities from OpenAI chat models, when needing to understand how log probabilities are represented in LangChain' Line 2: 'Explains how to configure OpenAI chat models to return token log probabilities, and how these are included in the response metadata and streamed responses.'
|
||||
[How to: create a custom chat model class](https://python.langchain.com/docs/how_to/custom_chat_model/): creating a custom chat model class, integrating a new language model as a chat model, or implementing streaming for a chat model. This page explains how to create a custom chat model class by inheriting from BaseChatModel, and implementing methods like _generate and _stream. It covers handling inputs, messages, streaming, identifying parameters, and contributing custom chat models.
|
||||
[How to: stream a response back](https://python.langchain.com/docs/how_to/chat_streaming/): LLM should read this page when: 1) It needs to stream chat model responses token-by-token 2) It needs to understand how to use the astream() and astream_events() methods for chat models 3) It wants to see examples of streaming chat model responses synchronously and asynchronously This page explains how to stream chat model responses token-by-token using the astream() and astream_events() methods, and provides examples for synchronous and asynchronous streaming with chat models that support this feature.
|
||||
[How to: track token usage](https://python.langchain.com/docs/how_to/chat_token_usage_tracking/): tracking token usage for chat models, determining costs of using chat models, implementing token usage tracking in applications. Provides methods to track token usage from OpenAI and Anthropic chat models through AIMessage.usage_metadata, callbacks, and using LangSmith. Covers streaming token usage and aggregating usage across multiple calls.
|
||||
[How to: track response metadata across providers](https://python.langchain.com/docs/how_to/response_metadata/): needing to access metadata from model responses, wanting to get information like token usage or log probabilities, or checking safety ratings Explains how to access response metadata from various chat model providers like OpenAI, Anthropic, Vertex AI, etc. Shows code examples of retrieving metadata like token usage, log probabilities, and safety ratings.
|
||||
[How to: use chat models to call tools](https://python.langchain.com/docs/how_to/tool_calling/): needing to call tools from chat models, wanting to use chat models to generate structured output, or doing extraction from text using chat models. Explains how to define tool schemas as Python functions, Pydantic/TypedDict classes, or LangChain Tools; bind them to chat models; retrieve tool calls from LLM responses; and optionally parse tool calls into structured objects.
|
||||
[How to: stream tool calls](https://python.langchain.com/docs/how_to/tool_streaming/): Line 1: 'wanting to stream tool calls, when needing to handle partial tool call data, or when needing to accumulate tool call chunks' Line 2: 'This page explains how to stream tool calls, merge message chunks to accumulate tool call chunks, and parse tool calls from accumulated chunks, with code examples.'
|
||||
[How to: handle rate limits](https://python.langchain.com/docs/how_to/chat_model_rate_limiting/): handling rate limits from model providers, running many parallel queries to a model, benchmarking a chat model. The page explains how to initialize and use an in-memory rate limiter with chat models to limit the number of requests made per unit time.
|
||||
[How to: few shot prompt tool behavior](https://python.langchain.com/docs/how_to/tools_few_shot/): using few-shot examples to improve tool calling, demonstrating how to incorporate example queries and responses into the prompt. The page explains how to create few-shot prompts including examples of tool usage, allowing the model to learn from these demonstrations to improve its ability to correctly call tools for math operations or other tasks.
|
||||
[How to: bind model-specific formatted tools](https://python.langchain.com/docs/how_to/tools_model_specific/): binding model-specific tools, binding OpenAI tool schemas, invoking model-specific tools This page explains how to bind model-specific tool schemas directly to an LLM, with an example using the OpenAI tool schema format.
|
||||
[How to: force models to call a tool](https://python.langchain.com/docs/how_to/tool_choice/): needing to force an LLM to call a specific tool, needing to force an LLM to call at least one tool This page shows how to use the tool_choice parameter to force an LLM to call a specific tool or to call at least one tool from a set of available tools.
|
||||
[How to: work with local models](https://python.langchain.com/docs/how_to/local_llms/): [running LLMs locally on a user's device, using open-source LLMs, utilizing custom prompts with LLMs] [Overview of open-source LLMs and frameworks for running inference locally, instructions for setting up and using local LLMs (Ollama, llama.cpp, GPT4All, llamafile), guidance on formatting prompts for specific LLMs, potential use cases for local LLMs.]
|
||||
[How to: init any model in one line](https://python.langchain.com/docs/how_to/chat_models_universal_init/): initializing chat models for different model providers, creating a configurable chat model, inferring the model provider from the model name. The page explains how to initialize any LLM chat model integration in one line using the init_chat_model() helper, create a configurable chat model with default or custom parameters, and infer the model provider based on the model name.
|
||||
|
||||
#### Messages
|
||||
|
||||
[Messages](https://python.langchain.com/docs/concepts/messages/): querying LangChain's chat message format, understanding different message types, building chat applications. Messages are the unit of communication in chat models, representing input/output with roles, content, metadata. Covers SystemMessage, HumanMessage, AIMessage, AIMessageChunk, ToolMessage, RemoveMessage, and legacy FunctionMessage.
|
||||
|
||||
[How to: manage large chat history](https://python.langchain.com/docs/how_to/trim_messages/): working with long chat histories, when concerned about token limits for chat models, when implementing token management strategies. This page explains how to use the trim_messages utility to reduce the size of a chat message history to fit within token limits, covering trimming by token count or message count, and allowing customization of trimming strategies.
|
||||
[How to: filter messages](https://python.langchain.com/docs/how_to/filter_messages/): needing to filter messages by type, id, or name when working with message histories, when using chains/agents that pass message histories between components. Provides instructions and examples for filtering message lists (e.g. to only include human messages) using the filter_messages utility, including basic usage, chaining with models, and API reference.
|
||||
[How to: merge consecutive messages of the same type](https://python.langchain.com/docs/how_to/merge_message_runs/): it needs to merge consecutive messages of the same type for a particular model, when it wants to compose the merge_message_runs utility with other components in a chain, or when it needs to invoke the merge_message_runs utility imperatively. The page explains how to use the merge_message_runs utility to merge consecutive messages of the same type, provides examples of using it in chains or invoking it directly, and links to the API reference for more details.
|
||||
|
||||
#### Prompt templates
|
||||
|
||||
[Prompt Templates](https://python.langchain.com/docs/concepts/prompt_templates/): creating prompts for language models, formatting chat messages, slotting messages into specific locations in a prompt. This page covers different types of prompt templates (string, chat, messages placeholder) for formatting prompts for language models and chat models.
|
||||
|
||||
[How to: use few shot examples](https://python.langchain.com/docs/how_to/few_shot_examples/): creating few-shot prompts, using example selectors, providing examples to large language models This page explains how to use few-shot examples to provide context to language models, including creating formatters, constructing example sets, using example selectors like SemanticSimilarityExampleSelector, and creating FewShotPromptTemplates.
|
||||
[How to: use few shot examples in chat models](https://python.langchain.com/docs/how_to/few_shot_examples_chat/): LLM should read this page when: 1) wanting to provide a few-shot example to fine-tune a chat model's output, 2) needing to dynamically select examples from a larger set based on semantic similarity to the input This page covers how to provide few-shot examples to chat models using either fixed examples or dynamically selecting examples from a vectorstore based on semantic similarity to the input.
|
||||
[How to: partially format prompt templates](https://python.langchain.com/docs/how_to/prompts_partial/): needing to partially format prompt templates, wanting to pass partial strings to templates, or needing to pass functions returning strings to templates. Explains how to partially format prompt templates by passing in a subset of required values as strings or functions that return strings, to create a new template expecting only remaining values.
|
||||
[How to: compose prompts together](https://python.langchain.com/docs/how_to/prompts_composition/): needing to compose prompts from various prompt components, working with chat prompts, or using the PipelinePromptTemplate class. This page explains how to concatenate different prompt templates together to build larger prompts, covering both string prompts and chat prompts, as well as using the PipelinePromptTemplate to reuse prompt components.
|
||||
|
||||
#### Example selectors
|
||||
|
||||
[Example Selectors](https://python.langchain.com/docs/concepts/example_selectors/): selecting examples for few-shot prompting, dynamically choosing examples for prompts, or understanding different example selection techniques. The page covers example selectors, which are classes responsible for selecting and formatting examples to include as part of prompts for improved performance with few-shot learning.
|
||||
|
||||
[How to: use example selectors](https://python.langchain.com/docs/how_to/example_selectors/): needing to select example prompts for few-shot learning, when having many examples to choose from, or when creating a custom example selector. Explains how to use example selectors in LangChain to select which examples to include in a prompt, covering built-in selectors like similarity and providing a custom example selector.
|
||||
[How to: select examples by length](https://python.langchain.com/docs/how_to/example_selectors_length_based/): selecting examples for few-shot prompting, handling long examples that may exceed context window, and dynamically including the appropriate number of examples. This page explains how to use the LengthBasedExampleSelector to select examples based on their length, including fewer examples for longer inputs to avoid exceeding the context window.
|
||||
[How to: select examples by semantic similarity](https://python.langchain.com/docs/how_to/example_selectors_similarity/): selecting relevant examples for few-shot prompting, building example-based systems, finding relevant reference cases This page covers how to select examples by similarity to the input using embedding-based semantic search over a vector store.
|
||||
[How to: select examples by semantic ngram overlap](https://python.langchain.com/docs/how_to/example_selectors_ngram/): selecting relevant examples to include in few-shot prompts, determining relevancy through n-gram overlap scores, and customizing example selection thresholds. Explains how to use the NGramOverlapExampleSelector to select and order examples based on n-gram overlap with the input text, including setting thresholds and dynamically adding examples.
|
||||
[How to: select examples by maximal marginal relevance](https://python.langchain.com/docs/how_to/example_selectors_mmr/): needing to select few-shot examples optimizing for both similarity to inputs and diversity from each other, working with example-based prompting for fewshot learning. Demonstrates how to use the MaxMarginalRelevanceExampleSelector, which selects examples by maximizing relevance to inputs while also optimizing for diversity between selected examples, contrasting it with just selecting by similarity.
|
||||
[How to: select examples from LangSmith few-shot datasets](https://python.langchain.com/docs/how_to/example_selectors_langsmith/): [learning how to use LangSmith datasets for few-shot example selection, dynamically creating few-shot prompts from LangSmith data, integrating LangSmith with LangChain chains] [The page covers setting up LangSmith, querying LangSmith datasets for similar examples, and using those examples in a LangChain chain to create dynamic few-shot prompts for chat models.]
|
||||
|
||||
#### LLMs
|
||||
|
||||
[LLMs](https://python.langchain.com/docs/concepts/text_llms/): needing an overview of string-based language models, learning about legacy models in LangChain, or comparing string-based models to chat models. Covers LangChain's support for older language models that take strings as input and output, distinguishing them from newer chat models; advises using chat models where possible.
|
||||
|
||||
[How to: cache model responses](https://python.langchain.com/docs/how_to/llm_caching/): it needs to cache responses to save money and time, learn about caching in LangChain. LangChain provides an optional caching layer for LLMs to save money and time by reducing API calls for repeated requests. Examples show caching with InMemoryCache and SQLiteCache.
|
||||
[How to: create a custom LLM class](https://python.langchain.com/docs/how_to/custom_llm/): creating a custom LLM class, wrapping their own LLM provider, integrating with a new language model not yet supported by LangChain. This page explains how to create a custom LLM class by implementing the required _call and _llm_type methods, as well as optional methods like _identifying_params, _acall, _stream, and _astream. It provides an example implementation, demonstrates testing and integration with LangChain APIs, and offers guidance for contributing custom LLM integrations.
|
||||
[How to: stream a response back](https://python.langchain.com/docs/how_to/streaming_llm/): it needs to stream responses from an LLM, when it needs to work with async streaming from LLMs, when it needs to stream events from an LLM. This page shows how to stream responses token-by-token from LLMs using both sync and async methods, as well as how to stream events from LLMs asynchronously.
|
||||
[How to: track token usage](https://python.langchain.com/docs/how_to/llm_token_usage_tracking/): tracking token usage for LLM calls, managing costs for an LLM application, or calculating costs based on token counts. The page covers how to track token usage using LangSmith, OpenAI callback handlers, and handling streaming contexts; it also summarizes limitations with legacy models for streaming.
|
||||
[How to: work with local models](https://python.langchain.com/docs/how_to/local_llms/): [running LLMs locally on a user's device, using open-source LLMs, utilizing custom prompts with LLMs] [Overview of open-source LLMs and frameworks for running inference locally, instructions for setting up and using local LLMs (Ollama, llama.cpp, GPT4All, llamafile), guidance on formatting prompts for specific LLMs, potential use cases for local LLMs.]
|
||||
|
||||
#### Output parsers
|
||||
|
||||
[Output Parsers](https://python.langchain.com/docs/concepts/output_parsers/): looking for ways to extract structured data from model outputs, parsing model outputs into different formats, or handling errors in parsing. Covers various LangChain output parsers like JSON, XML, CSV, Pandas DataFrame, along with capabilities like output fixing, retrying, and using user-defined formats.
|
||||
|
||||
[How to: parse text from message objects](https://python.langchain.com/docs/how_to/output_parser_string/): needing to parse text from message objects, needing to extract text from chat model responses, or working with structured output formats. This page explains how to use the StrOutputParser to extract text from message objects, regardless of the underlying content format, such as text, multimodal data, or structured output.
|
||||
[How to: use output parsers to parse an LLM response into structured format](https://python.langchain.com/docs/how_to/output_parser_structured/): [needing to parse LLM output into structured data, needing to stream partially parsed structured outputs, using LCEL with output parsers] 'Explains how to use output parsers like PydanticOutputParser to parse LLM text responses into structured formats like Python objects, and how to integrate them with prompts, models, and LCEL streaming.'
|
||||
[How to: parse JSON output](https://python.langchain.com/docs/how_to/output_parser_json/): LLM should read this page when: 1) Prompting a language model to return JSON output 2) Parsing JSON output from a language model 3) Streaming partial JSON objects from a language model 'This page explains how to use the JsonOutputParser to specify a desired JSON schema, prompt a language model to generate output conforming to that schema, and parse the model's response as JSON. It covers using JsonOutputParser with and without Pydantic, streaming partial JSON objects, and provides code examples.'
|
||||
[How to: parse XML output](https://python.langchain.com/docs/how_to/output_parser_xml/): needing to parse XML output from a model, when outputting prompts with XML formatting instructions for models, when streaming partial XML results This page shows how to use the XMLOutputParser to parse model output in XML format, including adding XML formatting instructions to prompts and streaming partial XML output
|
||||
[How to: parse YAML output](https://python.langchain.com/docs/how_to/output_parser_yaml/): LLM should read this page when: 1) Needing to generate YAML output conforming to a specific schema 2) Incorporating YAML output into a larger prompt/chain 3) Parsing YAML output returned by an LLM 'This page explains how to use the YamlOutputParser to parse YAML output from language models, allowing the output to conform to a predefined schema. It covers setting up the parser, constructing prompts with formatting instructions, and chaining the parser with a model.'
|
||||
[How to: retry when output parsing errors occur](https://python.langchain.com/docs/how_to/output_parser_retry/): [attempting to parse and handle partial or error LLM outputs, troubleshooting output parsing failures, implementing retry logic for parsing] [Explains how to use the RetryOutputParser to handle parsing errors by reprompting the LLM, provides examples for using it with OpenAI models and chaining it with other runnables.]
|
||||
[How to: try to fix errors in output parsing](https://python.langchain.com/docs/how_to/output_parser_fixing/): needing to handle improperly formatted outputs, attempting to fix formatting issues using an LLM, or parsing outputs that do not conform to a predefined schema. Explains how to use the OutputFixingParser, which wraps another parser and attempts to fix formatting errors by consulting an LLM when the original parser fails.
|
||||
[How to: write a custom output parser class](https://python.langchain.com/docs/how_to/output_parser_custom/): Line 1: 'creating a custom output parser, implementing a custom parser by inheriting from base classes, or parsing raw model outputs' Line 2: 'Covers how to create custom output parsers using runnable lambdas/generators (recommended) or by inheriting from base parser classes like BaseOutputParser and BaseGenerationOutputParser. Includes examples for simple and more complex parsing scenarios.'
|
||||
|
||||
#### Document loaders
|
||||
|
||||
[Document Loaders](https://python.langchain.com/docs/concepts/document_loaders/): needing to load data from various sources like files, webpages, or databases, or when handling large datasets with lazy loading. Document loaders help load data from different sources into a standardized Document object format, with options for lazy loading of large datasets.
|
||||
|
||||
- [How to: load PDF files](https://python.langchain.com/docs/how_to/document_loader_pdf/)
|
||||
[How to: load web pages](https://python.langchain.com/docs/how_to/document_loader_web/): LLM should read this page when: - It needs to load and process web pages for question answering or other applications - It needs guidance on using web page content with LangChain 'The page covers how to load web pages into LangChain's Document format, including simple text extraction and advanced parsing of page structure. It demonstrates tools like WebBaseLoader and UnstructuredLoader, and shows how to perform operations like vector search over loaded web content.'
|
||||
[How to: load CSV data](https://python.langchain.com/docs/how_to/document_loader_csv/): loading CSV files into a sequence of documents, customizing CSV parsing and loading, specifying a column to identify the document source This page explains how to load CSV files into a sequence of Document objects using LangChain's CSVLoader, including customizing the parsing, specifying a source column, and loading from a string.
|
||||
[How to: load data from a directory](https://python.langchain.com/docs/how_to/document_loader_directory/): loading documents from a file system, handling various file encodings, or using custom document loaders. Shows how to load files from directories using the DirectoryLoader, handle encoding errors, use multithreading, and customize the loader class.
|
||||
[How to: load HTML data](https://python.langchain.com/docs/how_to/document_loader_html/): loading HTML documents, parsing HTML files with specialized tools, or extracting text from HTML. This page covers how to load HTML documents into LangChain Document objects using Unstructured and BeautifulSoup4, with code examples and API references provided.
|
||||
[How to: load JSON data](https://python.langchain.com/docs/how_to/document_loader_json/): loading JSON or JSON Lines data into LangChain Documents, or extracting metadata from JSON data. This page explains how to use the JSONLoader to convert JSON and JSONL data into LangChain Documents, including how to extract specific fields into the content and metadata, and provides examples for common JSON structures.
|
||||
[How to: load Markdown data](https://python.langchain.com/docs/how_to/document_loader_markdown/): needing to load Markdown files, needing to retain Markdown elements, needing to parse Markdown into components This page covers how to load Markdown files into LangChain documents, including retaining elements like titles and lists, and parsing Markdown into components.
|
||||
[How to: load Microsoft Office data](https://python.langchain.com/docs/how_to/document_loader_office_file/): loading Microsoft Office files (DOCX, XLSX, PPTX) into LangChain, when working with Azure AI Document Intelligence. It covers how to use the AzureAIDocumentIntelligenceLoader to load Office documents into LangChain Documents for further processing.
|
||||
[How to: write a custom document loader](https://python.langchain.com/docs/how_to/document_loader_custom/): Line 1: 'creating a custom document loader, working with files, or using the GenericLoader abstraction' Line 2: 'This page explains how to create a custom document loader, work with files using BaseBlobParser and Blob, and use the GenericLoader to combine a BlobLoader with a BaseBlobParser.'
|
||||
|
||||
#### Text splitters
|
||||
|
||||
[Text Splitters](https://python.langchain.com/docs/concepts/text_splitters/): working with long documents, handling limited model input sizes, or optimizing retrieval systems This page discusses different strategies for splitting large texts into smaller chunks, including length-based, text structure-based, document structure-based, and semantic meaning-based approaches.
|
||||
|
||||
[How to: recursively split text](https://python.langchain.com/docs/how_to/recursive_text_splitter/): splitting long text into smaller chunks, processing text from languages without word boundaries like Chinese or Japanese, parsing documents for downstream tasks. Covers how to recursively split text by list of characters like newlines and spaces, and options to customize characters for different languages. Discusses chunk size, overlap, and creating LangChain Document objects.
|
||||
[How to: split HTML](https://python.langchain.com/docs/how_to/split_html/): needing to split HTML content into chunks, preserving semantic structure for better context during processing Explains different techniques to split HTML pages like HTMLHeaderTextSplitter, HTMLSectionSplitter, HTMLSemanticPreservingSplitter; covers preserving tables, lists, custom handlers
|
||||
[How to: split by character](https://python.langchain.com/docs/how_to/character_text_splitter/): needing to split text by individual characters, needing to control chunk size by character count, needing to handle text with differing chunk sizes. Explains how to split text into chunks by character count, using the CharacterTextSplitter. Covers setting chunk size, overlap, and passing metadata.
|
||||
[How to: split code](https://python.langchain.com/docs/how_to/code_splitter/): needing to split code into logical chunks, working with code from specific programming languages, or creating language-specific text splitters. Provides examples of using the RecursiveCharacterTextSplitter to split code from various programming languages like Python, JavaScript, Markdown, and others into document chunks based on language-specific separators.
|
||||
[How to: split Markdown by headers](https://python.langchain.com/docs/how_to/markdown_header_metadata_splitter/): splitting markdown files into chunks, handling headers and metadata in markdown files, constraining chunk sizes in markdown files. This page covers how to split markdown files by headers into chunks, handle metadata associated with headers, and constrain chunk sizes using other text splitters like RecursiveCharacterTextSplitter.
|
||||
[How to: recursively split JSON](https://python.langchain.com/docs/how_to/recursive_json_splitter/): splitting JSON data into smaller chunks, managing chunk sizes from list content within JSON data. Explains how to split JSON data into smaller chunks while keeping nested objects intact, control chunk sizes, and handle JSON lists by converting them to dictionaries before splitting.
|
||||
[How to: split text into semantic chunks](https://python.langchain.com/docs/how_to/semantic-chunker/): building an application that needs to split long text into smaller chunks based on semantic meaning, when working with large documents that need to be broken down into semantically coherent sections, or when needing to control the granularity of text splitting. This page explains how to use the SemanticChunker from LangChain to split text into semantically coherent chunks by leveraging embedding models, with options to control the splitting behavior based on percentile, standard deviation, interquartile range, or gradient of embedding distance.
|
||||
[How to: split by tokens](https://python.langchain.com/docs/how_to/split_by_token/): LLM should read this page when: 1) Splitting long text into chunks while counting tokens 2) Handling non-English languages for text splitting 3) Comparing different tokenizers for text splitting 'The page covers how to split text into chunks based on token count using different tokenizers like tiktoken, spaCy, SentenceTransformers, NLTK, KoNLPY (for Korean), and Hugging Face tokenizers. It explains the approaches, usage, and API references for each tokenizer.'
|
||||
|
||||
#### Embedding models
|
||||
|
||||
[Embedding Models](https://python.langchain.com/docs/concepts/embedding_models/): LLM should read this page when: 1) Working with text embeddings for search/retrieval 2) Comparing text similarity using embedding vectors 3) Selecting or integrating text embedding models It covers key concepts of embedding models: converting text to numerical vectors, measuring similarity between vectors, embedding models (historical context, interface, integrations), and common similarity metrics (cosine, Euclidean, dot product).
|
||||
[supported integrations](https://python.langchain.com/docs/integrations/text_embedding/): looking for integrations with embedding models, wanting to compare embedding providers, needing guidance on selecting an embedding model This page documents integrations with various model providers that allow using embeddings in LangChain, covering OpenAI, Azure, Google, AWS, HuggingFace, and other embedding services.
|
||||
|
||||
[How to: embed text data](https://python.langchain.com/docs/how_to/embed_text/): it needs to embed text into vectors, when it needs to use text embeddings for tasks like semantic search, and when it needs to understand the interface for text embedding models. This page explains how to use LangChain's Embeddings class to interface with various text embedding model providers, embed documents and queries, and work with the resulting vector representations of text.
|
||||
[How to: cache embedding results](https://python.langchain.com/docs/how_to/caching_embeddings/): caching document embeddings to improve performance, caching query embeddings to improve performance, or choosing a data store for caching embeddings. This page covers how to use the CacheBackedEmbeddings class to cache document and query embeddings in a ByteStore, demonstrating its usage with a local file store and an in-memory store. It also explains how to specify the cache namespace to avoid collisions.
|
||||
[How to: create a custom embeddings class](https://python.langchain.com/docs/how_to/custom_embeddings/): needing to use a custom text embedding model, integrating a new text embedding provider, or contributing a new text embedding integration. The page covers implementing custom text embedding models for LangChain by following the Embeddings interface, providing examples, testing, and contributing guidelines.
|
||||
|
||||
#### Vector stores
|
||||
|
||||
[Vector stores](https://python.langchain.com/docs/concepts/vectorstores/): LLM should read this page when: 1) Building applications that need to index and retrieve information based on semantic similarity 2) Integrating vector databases into their application 3) Exploring advanced vector search and retrieval techniques Vector stores are specialized data stores that enable indexing and retrieving information based on vector representations (embeddings) of data, allowing semantic similarity search over unstructured data like text, images, and audio. The page covers vector store integrations, the core interface, adding/deleting documents, basic and advanced similarity search techniques, and concepts like metadata filtering.
|
||||
[supported integrations](https://python.langchain.com/docs/integrations/vectorstores/): Line 1: 'integrating vector stores into applications, deciding which vector store to use, or understanding the capabilities of different vector stores' Line 2: 'This page provides an overview of vector stores, which are used to store embedded data and perform similarity search. It lists the different vector stores integrated with LangChain, along with their key features and capabilities.'
|
||||
|
||||
[How to: use a vector store to retrieve data](https://python.langchain.com/docs/how_to/vectorstores/): building applications that require searching over large collections of text, when indexing and retrieving relevant information based on similarity between embeddings, and when working with vector databases and embeddings. The page covers how to create and query vector stores, which are used to store embedded vectors of text and search for similar embeddings. It explains how to initialize different vector store options like Chroma, FAISS, and LanceDB, and how to perform similarity searches on them. It also touches on asynchronous operations with vector stores.
|
||||
|
||||
#### Retrievers
|
||||
|
||||
[Retrievers](https://python.langchain.com/docs/concepts/retrievers/): building a retrieval system, integrating different retrieval sources, or linking retrieved information to source documents. This page outlines the retriever interface in LangChain, common types of retrievers such as vector stores and search APIs, and advanced retrieval patterns like ensembling and retaining source document information.
|
||||
|
||||
[How to: use a vector store to retrieve data](https://python.langchain.com/docs/how_to/vectorstore_retriever/): using vector stores for retrieval, implementing maximum marginal relevance retrieval, or specifying additional search parameters. This page explains how to create a retriever from a vector store, how to use maximum marginal relevance retrieval, and how to pass parameters like similarity score thresholds and top-k results.
|
||||
[How to: generate multiple queries to retrieve data for](https://python.langchain.com/docs/how_to/MultiQueryRetriever/): Line 1: 'improving retrieval results for search queries, retrieving documents from a vector database, or using an LLM to generate multiple queries for a given input' Line 2: 'Explains how to use MultiQueryRetriever to automatically generate multiple queries from an input question using an LLM, retrieve documents for each query, and take the unique union of results to improve retrieval performance.'
|
||||
[How to: use contextual compression to compress the data retrieved](https://python.langchain.com/docs/how_to/contextual_compression/): [it needs to retrieve relevant information from a large corpus of documents, it needs to filter out irrelevant content from retrieved documents, it needs to compress or shorten documents to focus on query-relevant content] This page discusses contextual compression, a technique that allows retrieving only relevant portions of documents given a query, using various methods like LLM-based extractors/filters, embedding similarity filters, or combinations thereof via pipelines.
|
||||
[How to: write a custom retriever class](https://python.langchain.com/docs/how_to/custom_retriever/): learning how to create a custom retriever, when implementing custom retrieval logic, when adding retrieval capabilities to an application. Explains how to implement a custom Retriever class by extending BaseRetriever, including providing examples and guidelines for contributing custom retrievers.
|
||||
[How to: add similarity scores to retriever results](https://python.langchain.com/docs/how_to/add_scores_retriever/): needing to incorporate similarity/relevance scores from retrievers, using vector or multi-vector retrievers, or propagating scores through custom retriever subclasses Shows how to add similarity scores from retrievers like Vector Store Retrievers, SelfQueryRetriever, and MultiVectorRetriever to the metadata of retrieved documents
|
||||
[How to: combine the results from multiple retrievers](https://python.langchain.com/docs/how_to/ensemble_retriever/): combining results from multiple retriever algorithms, leveraging different retrieval strengths, or using a hybrid search approach. The page explains how to use the EnsembleRetriever to combine results from sparse and dense retrievers, outlines basic usage, and demonstrates runtime configuration of individual retrievers.
|
||||
[How to: reorder retrieved results to mitigate the "lost in the middle" effect](https://python.langchain.com/docs/how_to/long_context_reorder/): looking to improve performance of RAG applications, mitigating the "lost in the middle" effect, reordering retrieved results for longer contexts. Explains how to reorder retrieved documents to position the most relevant at the beginning and end, with less relevant in the middle, helping surface important information for language models.
|
||||
[How to: generate multiple embeddings per document](https://python.langchain.com/docs/how_to/multi_vector/): needing to retrieve documents using multiple vector embeddings per document, when working with long documents that need to be split into chunks, when using document summaries for retrieval. This page covers how to index documents using 1) document chunks, 2) summaries generated with an LLM, and 3) hypothetical questions generated with an LLM. It demonstrates the usage of the MultiVectorRetriever to retrieve parent documents based on vector embeddings of chunks/summaries/questions.
|
||||
[How to: retrieve the whole document for a chunk](https://python.langchain.com/docs/how_to/parent_document_retriever/): [1) wanting to retrieve larger documents instead of just smaller chunks for context, 2) trying to balance keeping context while splitting long documents] [The page explains how to use the ParentDocumentRetriever, which first splits documents into small chunks for indexing but then retrieves the larger parent documents those chunks came from during retrieval. It shows code examples for retrieving full documents as well as larger chunks rather than full documents.]
|
||||
[How to: generate metadata filters](https://python.langchain.com/docs/how_to/self_query/): needing to perform retrieval on documents based on semantic similarity to the query text and metadata filters, integrating the retrieval into a question-answering pipeline. Covers creating a Self Query Retriever which can perform semantic text retrieval and structured metadata filtering in one step, using an underlying vector store and a query constructor LLM chain to parse natural language queries into structured representations.
|
||||
[How to: create a time-weighted retriever](https://python.langchain.com/docs/how_to/time_weighted_vectorstore/): it needs to retrieve documents from a vector store considering both semantic similarity and time decay, it needs to simulate time for testing purposes, or it needs to adjust the balance between semantic similarity and recency in retrieving documents. This page explains how to use the TimeWeightedVectorStoreRetriever, which combines semantic similarity scores from a vector store with a time decay factor that reduces the relevance of older documents over time, and provides examples of using different decay rates and mocking time for testing.
|
||||
[How to: use hybrid vector and keyword retrieval](https://python.langchain.com/docs/how_to/hybrid/): LLM should read this page when: 1) It needs to perform hybrid search combining vector and other search techniques 2) It uses a vectorstore that supports hybrid search capabilities Explains how to configure and invoke LangChain chains to leverage hybrid search features of vectorstores like Astra DB, ElasticSearch, etc.
|
||||
|
||||
#### Indexing
|
||||
|
||||
Indexing is the process of keeping your vectorstore in-sync with the underlying data source.
|
||||
|
||||
[How to: reindex data to keep your vectorstore in-sync with the underlying data source](https://python.langchain.com/docs/how_to/indexing/): needing to index documents into a vector store, handling content deduplication and document mutations over time, or cleaning up old/deleted documents from the store. Covers the LangChain indexing API workflow, including deletion modes, using document loaders, and setting source metadata for documents to handle mutations and deletions properly.
|
||||
|
||||
#### Tools
|
||||
|
||||
[Tools](https://python.langchain.com/docs/concepts/tools/): needing an overview of tools in LangChain, wanting to create custom tools, or learning how to pass runtime values to tools. Tools are a way to encapsulate functions with schemas that can be passed to chat models supporting tool calling. The page covers the tool interface, creating tools using the @tool decorator, configuring tool schemas, tool artifacts, special type annotations like InjectedToolArg, and toolkits.
|
||||
|
||||
[How to: define a custom tool](https://python.langchain.com/docs/how_to/custom_tools/): creating custom tools for agents, converting functions or runnables to tools, or subclassing BaseTool. This page covers creating tools from functions using the @tool decorator or StructuredTool class, creating tools from Runnables, subclassing BaseTool for custom tools, creating async tools, handling tool errors, and returning artifacts from tool execution.
|
||||
[How to: use built-in tools and toolkits](https://python.langchain.com/docs/how_to/tools_builtin/): needing to use built-in LangChain tools or toolkits, needing to customize built-in LangChain tools. This page covers how to use LangChain's built-in tools and toolkits, including customizing tool names, descriptions, and argument schemas. It also explains how to use LangChain toolkits, which are collections of tools for specific tasks.
|
||||
[How to: use chat models to call tools](https://python.langchain.com/docs/how_to/tool_calling/): needing to call tools from chat models, wanting to use chat models to generate structured output, or doing extraction from text using chat models. Explains how to define tool schemas as Python functions, Pydantic/TypedDict classes, or LangChain Tools; bind them to chat models; retrieve tool calls from LLM responses; and optionally parse tool calls into structured objects.
|
||||
[How to: pass tool outputs to chat models](https://python.langchain.com/docs/how_to/tool_results_pass_to_model/): 1) integrating tools with chat models, 2) implementing tool calling functionality, 3) passing tool outputs back to chat models. Demonstrates how to pass tool function outputs back to chat models as tool messages, allowing the model to incorporate tool results in generating a final response.
|
||||
[How to: pass run time values to tools](https://python.langchain.com/docs/how_to/tool_runtime/): it needs to pass runtime values to tools, when it needs to prevent an LLM from generating certain tool arguments, and when it needs to inject arguments directly at runtime. This page explains how to use the InjectedToolArg annotation to mark certain parameters of a Tool as being injected at runtime, preventing the LLM from generating those arguments. It also shows how to inject the arguments at runtime and create a tool-executing chain.
|
||||
[How to: add a human-in-the-loop for tools](https://python.langchain.com/docs/how_to/tools_human/): adding human approval to tool calling, allowing human intervention in a workflow, or setting up fail-safes for sensitive operations. This page demonstrates how to add a human-in-the-loop step to approve or reject tool calls made by an LLM in a tool-calling chain using LangChain.
|
||||
[How to: handle tool errors](https://python.langchain.com/docs/how_to/tools_error/): needing to handle errors that occur when tools are called by an LLM, when building fault tolerance into tool-calling chains, or when enabling self-correction for tool calling errors. The page covers strategies like try/except for tool calls, fallbacks to different models, retrying with exceptions passed to the LLM, and creating custom tool exceptions.
|
||||
[How to: force models to call a tool](https://python.langchain.com/docs/how_to/tool_choice/): needing to force an LLM to call a specific tool, needing to force an LLM to call at least one tool This page shows how to use the tool_choice parameter to force an LLM to call a specific tool or to call at least one tool from a set of available tools.
|
||||
[How to: disable parallel tool calling](https://python.langchain.com/docs/how_to/tool_calling_parallel/): considering disabling parallel tool calling, when looking for examples on parallel vs. single tool calls, when trying to control the number of tool calls made. Explains how to disable parallel tool calling in LangChain so that only one tool is called at a time, providing code examples.
|
||||
[How to: access the `RunnableConfig` from a tool](https://python.langchain.com/docs/how_to/tool_configure/): accessing or configuring runtime behavior of sub-runnables from a custom tool, streaming events from child runnables within a tool This page explains how to access the RunnableConfig from within a custom tool to configure sub-invocations and stream events from those sub-invocations
|
||||
[How to: stream events from a tool](https://python.langchain.com/docs/how_to/tool_stream_events/): Line 1: 'it needs to stream events from a tool, when it needs to configure tools to access internal runnables, or when it needs to propagate configurations to child runnables in async environments' Line 2: 'Guide on how to stream events from tools that call chat models, retrievers, or other runnables, by accessing internal events and propagating configurations, with examples and explanations for compatibility across Python versions'
|
||||
[How to: return artifacts from a tool](https://python.langchain.com/docs/how_to/tool_artifacts/): returning structured data from a tool, passing artifacts to downstream components, handling custom data types from tools This page explains how tools can return artifacts separate from model input, allowing custom objects, dataframes, or images to be passed to downstream components while limiting model exposure.
|
||||
[How to: convert Runnables to tools](https://python.langchain.com/docs/how_to/convert_runnable_to_tool/): Line 1: 'needing to convert a Python function or Runnable into a LangChain tool, when building an agent that calls external tools, or when integrating a custom tool into a chat model' Line 2: 'Demonstrates how to use the Runnable.as_tool() method to convert a Runnable to a tool with a name, description, and arguments schema. Includes examples of agents calling tools created from Runnables.'
|
||||
[How to: add ad-hoc tool calling capability to models](https://python.langchain.com/docs/how_to/tools_prompting/): LLM should read this page when: 1) Adding ad-hoc tool calling capability to chat models/LLMs, 2) Using models not fine-tuned for tool calling, 3) Invoking custom tools from LLMs 'This guide demonstrates how to create prompts that instruct LLMs to request tool invocations, parse the LLM output to extract tool and arguments, invoke the requested tool, and return the tool output.'
|
||||
[How to: pass runtime secrets to a runnable](https://python.langchain.com/docs/how_to/runnable_runtime_secrets/): needing to pass sensitive data to a runnable, ensuring secrets remain hidden from tracing, or integrating secret values with runnables. Explains how to pass runtime secrets to runnables using RunnableConfig, allowing certain keys to be hidden from tracing while still being accessible during invocation.
|
||||
|
||||
#### Multimodal
|
||||
|
||||
[How to: pass multimodal data directly to models](https://python.langchain.com/docs/how_to/multimodal_inputs/): needing to pass multimodal data (images, videos, etc.) to models, when working with models that support multimodal input and tool calling capabilities, and when looking to understand how to encode and pass different types of multimodal data. This page demonstrates how to pass multimodal input like images directly to LLMs and chat models, covering encoding techniques, passing single/multiple images, and invoking models with image/multimodal content. It also shows how to use multimodal models for tool calling.
|
||||
[How to: use multimodal prompts](https://python.langchain.com/docs/how_to/multimodal_prompts/): wanting to pass multimodal data like images to an LLM, when wanting to send multiple pieces of multimodal data to an LLM, when wanting instructions on how to format multimodal prompts. This shows how to use prompt templates to format multimodal inputs like images to models that support it, including sending multiple images, and comparing images.
|
||||
|
||||
#### Agents
|
||||
|
||||
:::note
|
||||
|
||||
[LangGraph](https://langchain-ai.github.io/langgraph/): learning about LangGraph, considering using LangGraph for an AI application, or deciding between LangGraph and alternatives. Overview of LangGraph as an open-source framework for building AI agents, its key features like reliability and customizability, its ecosystem integration with other LangChain products, and additional learning resources.
|
||||
|
||||
:::
|
||||
|
||||
[How to: use legacy LangChain Agents (AgentExecutor)](https://python.langchain.com/docs/how_to/agent_executor/): building agents with specific tools, when working with chat history, when using language models for tool calling. This page explains how to build agents with AgentExecutor that can call tools like search engines and retrievers, how to add chat history to agents, and how to use language models to determine which tools to call.
|
||||
[How to: migrate from legacy LangChain agents to LangGraph](https://python.langchain.com/docs/how_to/migrate_agent/): LLM should read this page when: 1) Migrating from legacy LangChain agents to LangGraph 2) Comparing the functionality of LangChain and LangGraph agents This page provides a detailed guide on migrating from legacy LangChain agents to LangGraph agents, covering topics such as basic usage, prompt templates, memory handling, iterating through steps, dealing with intermediate steps, setting iteration and execution time limits, early stopping methods, and trimming intermediate steps.
|
||||
|
||||
#### Callbacks
|
||||
|
||||
[Callbacks](https://python.langchain.com/docs/concepts/callbacks/): [needing to log, monitor, or stream events in an LLM application] [This page covers LangChain's callback system, which allows hooking into various stages of an LLM application for logging, monitoring, streaming, and other purposes. It explains the different callback events, callback handlers, and how to pass callbacks.]
|
||||
|
||||
[How to: pass in callbacks at runtime](https://python.langchain.com/docs/how_to/callbacks_runtime/): needing to pass callback handlers at runtime to capture events, needing to attach handlers to nested objects This page explains how to pass callback handlers at runtime when invoking a runnable, which allows capturing events from all nested objects without manually attaching handlers.
|
||||
[How to: attach callbacks to a module](https://python.langchain.com/docs/how_to/callbacks_attach/): attaching callbacks to a runnable, reusing callbacks across multiple executions, composing a chain of runnables This page explains how to attach callbacks to a runnable using the .with_config() method, allowing callbacks to be reused across multiple executions and propagated to child components in a chain of runnables.
|
||||
[How to: pass callbacks into a module constructor](https://python.langchain.com/docs/how_to/callbacks_constructor/): LLM should read this page when: 1) Implementing callbacks in LangChain, 2) Understanding the scope of constructor callbacks, 3) Deciding whether to use constructor or runtime callbacks 'This page explains how to pass callbacks into the constructor of LangChain objects, and that constructor callbacks are scoped only to the object they are defined on, not inherited by child objects.'
|
||||
[How to: create custom callback handlers](https://python.langchain.com/docs/how_to/custom_callbacks/): creating custom behavior for LangChain components, customizing callback events, implementing event handlers This page explains how to create custom callback handlers by implementing callback methods and attaching the handler to LangChain components
|
||||
[How to: use callbacks in async environments](https://python.langchain.com/docs/how_to/callbacks_async/): needing to use callbacks in async environments, handling sync callbacks in async methods, using AsyncCallbackHandler Covers using callbacks with async APIs, avoiding blocking with AsyncCallbackHandler, propagating callbacks in async runnables, example of sync and async callback handlers
|
||||
[How to: dispatch custom callback events](https://python.langchain.com/docs/how_to/callbacks_custom_events/): dispatching custom callback events, handling async or sync custom callback events, or consuming custom events via the astream events API. This page covers how to dispatch custom callback events from within a Runnable, consume these events via async/sync callback handlers, and access custom events through the astream events API.
|
||||
|
||||
#### Custom
|
||||
|
||||
All of LangChain components can easily be extended to support your own versions.
|
||||
|
||||
[How to: create a custom chat model class](https://python.langchain.com/docs/how_to/custom_chat_model/): creating a custom chat model class, integrating a new language model as a chat model, or implementing streaming for a chat model. This page explains how to create a custom chat model class by inheriting from BaseChatModel, and implementing methods like _generate and _stream. It covers handling inputs, messages, streaming, identifying parameters, and contributing custom chat models.
|
||||
[How to: create a custom LLM class](https://python.langchain.com/docs/how_to/custom_llm/): creating a custom LLM class, wrapping their own LLM provider, integrating with a new language model not yet supported by LangChain. This page explains how to create a custom LLM class by implementing the required _call and _llm_type methods, as well as optional methods like _identifying_params, _acall, _stream, and _astream. It provides an example implementation, demonstrates testing and integration with LangChain APIs, and offers guidance for contributing custom LLM integrations.
|
||||
[How to: create a custom embeddings class](https://python.langchain.com/docs/how_to/custom_embeddings/): needing to use a custom text embedding model, integrating a new text embedding provider, or contributing a new text embedding integration. The page covers implementing custom text embedding models for LangChain by following the Embeddings interface, providing examples, testing, and contributing guidelines.
|
||||
[How to: write a custom retriever class](https://python.langchain.com/docs/how_to/custom_retriever/): learning how to create a custom retriever, when implementing custom retrieval logic, when adding retrieval capabilities to an application. Explains how to implement a custom Retriever class by extending BaseRetriever, including providing examples and guidelines for contributing custom retrievers.
|
||||
[How to: write a custom document loader](https://python.langchain.com/docs/how_to/document_loader_custom/): Line 1: 'creating a custom document loader, working with files, or using the GenericLoader abstraction' Line 2: 'This page explains how to create a custom document loader, work with files using BaseBlobParser and Blob, and use the GenericLoader to combine a BlobLoader with a BaseBlobParser.'
|
||||
[How to: write a custom output parser class](https://python.langchain.com/docs/how_to/output_parser_custom/): Line 1: 'creating a custom output parser, implementing a custom parser by inheriting from base classes, or parsing raw model outputs' Line 2: 'Covers how to create custom output parsers using runnable lambdas/generators (recommended) or by inheriting from base parser classes like BaseOutputParser and BaseGenerationOutputParser. Includes examples for simple and more complex parsing scenarios.'
|
||||
[How to: create custom callback handlers](https://python.langchain.com/docs/how_to/custom_callbacks/): creating custom behavior for LangChain components, customizing callback events, implementing event handlers This page explains how to create custom callback handlers by implementing callback methods and attaching the handler to LangChain components
|
||||
[How to: define a custom tool](https://python.langchain.com/docs/how_to/custom_tools/): creating custom tools for agents, converting functions or runnables to tools, or subclassing BaseTool. This page covers creating tools from functions using the @tool decorator or StructuredTool class, creating tools from Runnables, subclassing BaseTool for custom tools, creating async tools, handling tool errors, and returning artifacts from tool execution.
|
||||
[How to: dispatch custom callback events](https://python.langchain.com/docs/how_to/callbacks_custom_events/): dispatching custom callback events, handling async or sync custom callback events, or consuming custom events via the astream events API. This page covers how to dispatch custom callback events from within a Runnable, consume these events via async/sync callback handlers, and access custom events through the astream events API.
|
||||
|
||||
#### Serialization
|
||||
|
||||
[How to: save and load LangChain objects](https://python.langchain.com/docs/how_to/serialization/): needing to save and reload LangChain objects, handle API keys securely when serializing/deserializing objects, and maintain compatibility when deserializing objects across different versions of LangChain. This page discusses how to save and load serializable LangChain objects like chains, messages, and documents using the dump/load functions, which separate API keys and ensure cross-version compatibility. Examples are provided for serializing/deserializing to JSON strings, Python dicts, and disk files.
|
||||
|
||||
## Use cases
|
||||
|
||||
These guides cover use-case specific details.
|
||||
|
||||
### Q&A with RAG
|
||||
|
||||
Retrieval Augmented Generation (RAG) is a way to connect LLMs to external sources of data.
|
||||
[this guide](https://python.langchain.com/docs/tutorials/rag/): building a retrieval-augmented question-answering system, when needing to index and search through unstructured data sources, when learning about key concepts like document loaders, text splitters, vector stores, and retrievers. This tutorial covers how to build a Q&A application over textual data by loading documents, splitting them into chunks, embedding and storing the chunks in a vector store, retrieving relevant chunks for a user query, and generating an answer using a language model with the retrieved context.
|
||||
|
||||
[How to: add chat history](https://python.langchain.com/docs/how_to/qa_chat_history_how_to/): building a conversational question-answering application, incorporating chat history and retrieval from external knowledge sources, and deciding between using chains or agents for the application logic. Discusses building chat applications with LangChain by using chains for predictable retrieval steps or agents for more dynamic reasoning. Covers setting up components like embeddings and vector stores, constructing chains with tool calls for retrieval, and assembling LangGraph agents with a ReAct executor. Provides examples for testing the applications.
|
||||
[How to: stream](https://python.langchain.com/docs/how_to/qa_streaming/): LLM should read this page when: 1) Building a RAG (Retrieval Augmented Generation) application that requires streaming final outputs or intermediate steps 2) Integrating streaming capabilities into an existing LLM-based application 'The page provides guidance on how to stream final outputs and intermediate steps from a RAG (Retrieval Augmented Generation) application built with LangChain and LangGraph. It covers setting up the necessary components, constructing the RAG application, and utilizing different streaming modes to stream tokens from the final output or individual state updates from each step.'
|
||||
[How to: return sources](https://python.langchain.com/docs/how_to/qa_sources/): LLM should read this page when: 1) Building a question-answering (QA) application that needs to return the sources used to generate the answer. 2) Implementing a conversational QA system with retrieval-augmented generation (RAG). 3) Structuring model outputs to include sources or citations. 'This guide explains how to configure LangChain's QA and RAG workflows to retrieve and return the source documents or citations used to generate the final answer. It covers both basic RAG and conversational RAG architectures, and demonstrates techniques for structuring the model output to include source information.'
|
||||
[How to: return citations](https://python.langchain.com/docs/how_to/qa_citations/): seeking to add citations to results from a Retrieval Augmented Generation (RAG) application, when wanting to justify an answer using source material, and when needing to provide evidence for generated outputs. The page covers various methods for getting a RAG application to cite sources used in generating answers, including tool-calling to return source IDs or text snippets, direct prompting to generate structured outputs with citations, retrieving and compressing context to minimize need for citations, and post-processing generated answers to annotate with citations.
|
||||
[How to: do per-user retrieval](https://python.langchain.com/docs/how_to/qa_per_user/): needing to configure retrieval chains for per-user data access, wanting to limit document access for different users, or building retrieval applications with multi-tenant architectures. Explains how to configure retriever search kwargs to limit retrieved documents based on user, demonstrates code example using Pinecone namespace for multi-tenancy.
|
||||
|
||||
|
||||
### Extraction
|
||||
|
||||
Extraction is when you use LLMs to extract structured information from unstructured text.
|
||||
[this guide](https://python.langchain.com/docs/tutorials/extraction/): building information extraction applications, understanding how to use reference examples for improving extraction performance, or when needing to extract structured data from unstructured text. This tutorial covers building an information extraction chain using LangChain, defining schemas for extracting structured data, using reference examples to improve extraction quality, and extracting multiple entities from text.
|
||||
|
||||
[How to: use reference examples](https://python.langchain.com/docs/how_to/extraction_examples/): wanting to use reference examples to improve extraction quality, wanting to structure example inputs and outputs for extraction, wanting to test an extraction model with and without examples. This page explains how to define reference examples in the format expected for the LangChain tool calling API, how to incorporate these examples into prompts, and how using examples can improve extraction performance compared to not using examples.
|
||||
[How to: handle long text](https://python.langchain.com/docs/how_to/extraction_long_text/): working with large documents or PDFs that exceed the context window of the LLM, when needing to extract structured information from text. This page covers strategies for handling long text when doing information extraction, including a brute force approach of chunking the text and extracting from each chunk, and a retrieval-augmented generation (RAG) approach of indexing the chunks and only extracting from relevant ones. It also discusses common issues with these approaches.
|
||||
[How to: do extraction without using function calling](https://python.langchain.com/docs/how_to/extraction_parse/): looking to extract structured data from text, when needing to parse model outputs into objects, or when wanting to avoid using tool calling methods for extraction tasks. This page explains how to use prompting instructions to get LLMs to generate outputs in a structured format like JSON, and then use output parsers to convert the model responses into Python objects.
|
||||
|
||||
### Chatbots
|
||||
|
||||
Chatbots involve using an LLM to have a conversation.
|
||||
[this guide](https://python.langchain.com/docs/tutorials/chatbot/): building a chatbot application, incorporating conversational history, or using prompt templates. This page demonstrates how to build a chatbot with LangChain, including adding message persistence, prompt templates, conversation history management, and response streaming.
|
||||
|
||||
[How to: manage memory](https://python.langchain.com/docs/how_to/chatbots_memory/): LLM should read this page when: 1) Building a chatbot and wants to incorporate memory (chat history) 2) Looking to add context from previous messages to improve responses 3) Needs techniques to handle long conversations by summarizing or trimming history 'The page covers different techniques to add memory capabilities to chatbots, including passing previous messages directly, automatic history management using LangGraph persistence, trimming messages to reduce context, and generating summaries of conversations. Examples in Python are provided for each approach.'
|
||||
[How to: do retrieval](https://python.langchain.com/docs/how_to/chatbots_retrieval/): building a retrieval-augmented chatbot, adding conversational context to retrieval queries, or streaming responses from a chatbot. This page covers setting up a retriever over a document corpus, creating document chains and retrieval chains, transforming queries for better retrieval, and streaming responses from the retrieval chain.
|
||||
[How to: use tools](https://python.langchain.com/docs/how_to/chatbots_tools/): looking to integrate tools into chatbots, when using agents with tools, when incorporating web search into conversational agents. The page covers how to create a conversational agent using LangChain that can interact with APIs and web search tools, while maintaining chat history. It demonstrates setting up a ReAct agent with a Tavily search tool, invoking the agent, handling conversational responses with chat history, and adding memory.
|
||||
[How to: manage large chat history](https://python.langchain.com/docs/how_to/trim_messages/): working with long chat histories, when concerned about token limits for chat models, when implementing token management strategies. This page explains how to use the trim_messages utility to reduce the size of a chat message history to fit within token limits, covering trimming by token count or message count, and allowing customization of trimming strategies.
|
||||
|
||||
### Query analysis
|
||||
|
||||
Query Analysis is the task of using an LLM to generate a query to send to a retriever.
|
||||
[this guide](https://python.langchain.com/docs/tutorials/rag/#query-analysis): LLM should read this page when: 1) Building a question-answering application over unstructured data 2) Learning about Retrieval Augmented Generation (RAG) architectures 3) Indexing data for use with LLMs 'This tutorial covers building a Retrieval Augmented Generation (RAG) application that can answer questions based on ingested data. It walks through loading data, chunking it, embedding and storing it in a vector store, retrieving relevant chunks for a given query, and generating an answer using an LLM. It also shows how to incorporate query analysis for improved retrieval.'
|
||||
|
||||
[How to: add examples to the prompt](https://python.langchain.com/docs/how_to/query_few_shot/): needing to guide an LLM to generate queries, when fine-tuning an LLM for query generation, when incorporating examples into few-shot prompts. This page covers how to add examples to prompts for query analysis in LangChain, including setting up the system, defining the query schema, generating queries, and tuning prompts by adding examples.
|
||||
[How to: handle cases where no queries are generated](https://python.langchain.com/docs/how_to/query_no_queries/): querying for information, handling cases where no queries are generated, integrating query analysis with retrieval. Provides guidance on handling scenarios where query analysis techniques allow for no queries to be generated, including code examples for structuring the output, performing query analysis with an LLM, and integrating query analysis with a retriever in a chain.
|
||||
[How to: handle multiple queries](https://python.langchain.com/docs/how_to/query_multiple_queries/): handling queries that generate multiple potential queries, combining retrieval results from multiple queries, and integrating query analysis with retrieval pipelines. Explains how to handle scenarios where a query analysis step produces multiple potential queries by running retrievals for each query and combining the results. Demonstrates this approach with code examples using LangChain components.
|
||||
[How to: handle multiple retrievers](https://python.langchain.com/docs/how_to/query_multiple_retrievers/): needing to handle multiple retrievers for query analysis, when implementing a query analyzer that can select between different retrievers, when building a retrieval-augmented system that needs to choose between different data sources. This page explains how to handle scenarios where a query analysis step allows for selecting between multiple retrievers, showing an example implementation using LangChain's tools for structured output parsing, prompting, and chaining components together.
|
||||
[How to: construct filters](https://python.langchain.com/docs/how_to/query_constructing_filters/): constructing filters for query analysis, translating filters to specific retriever formats, using LangChain's structured query objects. This page covers how to construct filters as Pydantic models and translate them into retriever-specific filters using LangChain's translators for Chroma and Elasticsearch.
|
||||
[How to: deal with high cardinality categorical variables](https://python.langchain.com/docs/how_to/query_high_cardinality/): dealing with categorical data with high cardinality, handling potential misspellings of categorical values, and filtering based on categorical values. The page discusses techniques for handling high-cardinality categorical data in query analysis, such as adding all possible values to the prompt, using a vector store to find relevant values, and correcting user input to the closest valid categorical value.
|
||||
|
||||
### Q&A over SQL + CSV
|
||||
|
||||
You can use LLMs to do question answering over tabular data.
|
||||
[this guide](https://python.langchain.com/docs/tutorials/sql_qa/): LLM should read this page when: 1. Building a question-answering system over a SQL database 2. Implementing agents or chains to interact with a SQL database 'This tutorial covers building question-answering systems over SQL databases using LangChain. It demonstrates creating chains and agents that can generate SQL queries from natural language, execute them against a database, and provide natural language responses. It covers techniques like schema exploration, query validation, and handling high-cardinality columns.'
|
||||
|
||||
[How to: use prompting to improve results](https://python.langchain.com/docs/how_to/sql_prompting/): 'querying SQL databases with a language model, when doing few-shot prompting for SQL queries, and when selecting relevant few-shot examples dynamically.' 'This page covers how to improve SQL query generation prompts by incorporating database schema information, providing few-shot examples, and dynamically selecting the most relevant few-shot examples using semantic similarity.'
|
||||
[How to: do query validation](https://python.langchain.com/docs/how_to/sql_query_checking/): Line 1: 'working on SQL query generation, handling invalid SQL queries, or incorporating human approval for SQL queries' Line 2: 'This page covers strategies for validating SQL queries, such as appending a query validator step, prompt engineering, human-in-the-loop approval, and error handling.'
|
||||
[How to: deal with large databases](https://python.langchain.com/docs/how_to/sql_large_db/): dealing with large databases in SQL question-answering, identifying relevant table schemas to include in prompts, and handling high-cardinality columns with proper nouns or other unique values. The page discusses methods to identify relevant tables and table schemas to include in prompts when dealing with large databases. It also covers techniques to handle high-cardinality columns containing proper nouns or other unique values, such as creating a vector store of distinct values and querying it to include relevant spellings in prompts.
|
||||
[How to: deal with CSV files](https://python.langchain.com/docs/how_to/sql_csv/): needing to build question-answering systems over CSV data, wanting to understand the tradeoffs between using SQL or Python libraries like Pandas, and requiring guidance on securely executing code from language models. This page covers two main approaches to question answering over CSV data: using SQL by loading CSVs into a database, or giving an LLM access to Python environments to interact with CSV data using libraries like Pandas. It discusses the security implications of each approach and provides code examples for implementing question-answering chains and agents with both methods.
|
||||
|
||||
### Q&A over graph databases
|
||||
|
||||
You can use an LLM to do question answering over graph databases.
|
||||
[this guide](https://python.langchain.com/docs/tutorials/graph/): LLM should read this page when: 1) Building a question-answering system over a graph database 2) Implementing text-to-query generation for graph databases 3) Learning techniques for query validation and error handling 'This page covers building a question-answering application over a graph database using LangChain. It provides a basic implementation using the GraphQACypherChain, followed by an advanced implementation with LangGraph. The latter includes techniques like few-shot prompting, query validation, and error handling for generating accurate Cypher queries from natural language.'
|
||||
|
||||
[How to: add a semantic layer over the database](https://python.langchain.com/docs/how_to/graph_semantic/): needing to add a semantic layer over a graph database, needing to use tools representing Cypher templates with an LLM, or needing to build a LangGraph Agent to interact with a Neo4j database. This page covers how to create custom tools with Cypher templates for a Neo4j graph database, bind those tools to an LLM, and build a LangGraph Agent that can invoke the tools to retrieve information from the graph database.
|
||||
[How to: construct knowledge graphs](https://python.langchain.com/docs/how_to/graph_constructing/): constructing knowledge graphs from unstructured text, storing information in a graph database, using LLM Graph Transformer to extract knowledge from text. This page explains how to set up a Neo4j graph database, use LLMGraphTransformer to extract structured knowledge graph data from text, filter extracted nodes/relationships, and store the knowledge graph in Neo4j.
|
||||
|
||||
### Summarization
|
||||
|
||||
LLMs can summarize and otherwise distill desired information from text, including
|
||||
[this guide](https://python.langchain.com/docs/tutorials/summarization/): needing to summarize long texts or documents, when building question-answering systems, when creating text analysis applications. This page covers summarizing texts using LangChain, including the "stuff" method (concatenating into single prompt), the "map-reduce" method (splitting into chunks for parallel summarization), and orchestrating these methods using LangGraph.
|
||||
|
||||
[How to: summarize text in a single LLM call](https://python.langchain.com/docs/how_to/summarize_stuff/): looking to summarize text, seeking a simple single-LLM summarization method, or exploring basic summarization chains in LangChain. This page outlines how to use LangChain's pre-built 'stuff' summarization chain, which stuffs text into a prompt for an LLM to summarize in a single call.
|
||||
[How to: summarize text through parallelization](https://python.langchain.com/docs/how_to/summarize_map_reduce/): needing to summarize long text documents using parallelization, needing to optimize summarization for large volumes of text, and needing efficient summarization strategies. This page discusses using a map-reduce strategy to summarize text through parallelization, including breaking the text into subdocuments, generating summaries for each in parallel (map step), and then consolidating the summaries into a final summary (reduce step). It provides code examples using LangChain and LangGraph.
|
||||
[How to: summarize text through iterative refinement](https://python.langchain.com/docs/how_to/summarize_refine/): LLM should read this page when: 1. Attempting to summarize long texts through iterative refinement 2. Learning about building applications with LangGraph 3. Seeking examples of streaming LLM outputs 'This guide demonstrates how to summarize text through iterative refinement using LangGraph. It involves splitting the text into documents, summarizing the first document, and then refining the summary based on subsequent documents until finished. The approach leverages LangGraph's streaming capabilities and modularity.'
|
||||
|
||||
## LangChain Expression Language (LCEL)
|
||||
|
||||
[LCEL](https://python.langchain.com/docs/concepts/lcel/): needing an overview of the LangChain Expression Language (LCEL), deciding whether to use LCEL or not, and understanding how to compose chains using LCEL primitives. Provides an overview of the LCEL, a declarative approach to building chains from existing Runnables, covering its benefits, composition primitives like RunnableSequence and RunnableParallel, the composition syntax, automatic type coercion, and guidance on when to use LCEL versus alternatives like LangGraph.
|
||||
|
||||
[**LCEL cheatsheet**](https://python.langchain.com/docs/how_to/lcel_cheatsheet/): 'needing a reference for interacting with Runnables in LangChain or building custom runnables and chains' 'This page provides a comprehensive cheatsheet with examples for key operations with Runnables such as invoking, batching, streaming, composing, configuring, and dynamically building runnables and chains'
|
||||
|
||||
[**Migration guide**](https://python.langchain.com/docs/versions/migrating_chains/): migrating older chains from LangChain v0.0, reimplementing legacy chains, or upgrading to use LCEL and LangGraph This page provides guidance on migrating from deprecated v0.0 chain implementations to using LCEL and LangGraph, including specific guides for various legacy chains like LLMChain, ConversationChain, RetrievalQA, and others.
|
||||
|
||||
[How to: chain runnables](https://python.langchain.com/docs/how_to/sequence/): chaining multiple LangChain components together, composing prompt templates with models, or combining runnables in a sequence. This page explains how to chain runnables (LangChain components) together using the pipe operator '|' or the .pipe() method, including chaining prompt templates with models and parsers, and how input/output formats are coerced during chaining.
|
||||
[How to: stream runnables](https://python.langchain.com/docs/how_to/streaming/): Line 1: 'wanting to learn how to stream LLM responses, stream intermediate steps, and configure streaming events.' Line 2: 'This page covers how to use the `stream` and `astream` methods to stream final outputs, how to use `astream_events` to stream both final outputs and intermediate steps, filtering events, propagating callbacks for streaming, and working with input streams.'
|
||||
[How to: invoke runnables in parallel](https://python.langchain.com/docs/how_to/parallel/): parallelizing steps in a chain, formatting data for chaining, or splitting inputs to run multiple runnables in parallel. Explains how to use RunnableParallel to execute runnables concurrently, format data between steps, and provides examples of parallelizing chains.
|
||||
[How to: add default invocation args to runnables](https://python.langchain.com/docs/how_to/binding/): LLM should read this page when: 1) Wanting to invoke a Runnable with constant arguments not part of the preceding output or user input 2) Needing to bind provider-specific arguments like stop sequences or tools 'This page explains how to use the Runnable.bind() method to set default invocation arguments for a Runnable within a RunnableSequence. It covers binding stop sequences to language models and attaching OpenAI tools.'
|
||||
[How to: turn any function into a runnable](https://python.langchain.com/docs/how_to/functions/): needing to use custom functions, needing to implement streaming, needing to pass metadata to runnables Covers how to use custom functions as Runnables, including constructors, decorators, coercion, passing metadata, and implementing streaming.
|
||||
[How to: pass through inputs from one chain step to the next](https://python.langchain.com/docs/how_to/passthrough/): needing to pass data from one step to the next in a chain, when formatting inputs for prompts, when retrieving and preparing context for prompts. This page explains how to use RunnablePassthrough and RunnableParallel to pass data unchanged through chains, covering examples like formatting retrieval results and user inputs into prompts.
|
||||
[How to: configure runnable behavior at runtime](https://python.langchain.com/docs/how_to/configure/): configuring chain internals at runtime, swapping models or prompts within a chain, or exploring different configurations of runnables. The page covers how to use .configurable_fields to configure parameters of a runnable at runtime, and .configurable_alternatives to swap out runnables with alternatives, including examples for chat models, prompts, and combinations thereof.
|
||||
[How to: add message history (memory) to a chain](https://python.langchain.com/docs/how_to/message_history/): building a chatbot or multi-turn application, wanting to persist conversational state, wanting to manage message history This page explains how to add message history and persist conversational state using LangGraph, covering examples with chat models and prompt templates, and managing the message history.
|
||||
[How to: route between sub-chains](https://python.langchain.com/docs/how_to/routing/): LLM should read this page when: - It needs to conditionally route between sub-chains based on previous outputs - It needs to use semantic similarity to choose the most relevant prompt for a given query 'The page covers how to route between sub-chains in LangChain, including using custom functions, RunnableBranch, and semantic similarity for prompt routing. It provides code examples for each method.'
|
||||
[How to: create a dynamic (self-constructing) chain](https://python.langchain.com/docs/how_to/dynamic_chain/): developing dynamic chains, implementing conditional routing, returning runnables dynamically The page explains how to create a dynamic chain that constructs parts of itself at runtime by having Runnable Lambdas return other Runnables.
|
||||
[How to: inspect runnables](https://python.langchain.com/docs/how_to/inspect/): inspecting internals of an LCEL chain, debugging chain logic, or retrieving chain prompts. Provides methods to visualize chain graphs, print prompts used in chains, and inspect chain steps programmatically.
|
||||
[How to: add fallbacks to a runnable](https://python.langchain.com/docs/how_to/fallbacks/): needing to add fallback options in case of errors, processing long inputs, or wanting to use a better model. This page explains how to configure fallback chains for LLM APIs in case of rate limiting or errors, for handling long input texts exceeding context windows, and for defaulting to better models when parsing fails.
|
||||
[How to: pass runtime secrets to a runnable](https://python.langchain.com/docs/how_to/runnable_runtime_secrets/): needing to pass sensitive data to a runnable, ensuring secrets remain hidden from tracing, or integrating secret values with runnables. Explains how to pass runtime secrets to runnables using RunnableConfig, allowing certain keys to be hidden from tracing while still being accessible during invocation.
|
||||
|
||||
Tracing gives you observability inside your chains and agents, and is vital in diagnosing issues.
|
||||
|
||||
[How to: trace with LangChain](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain/): tracing LangChain applications with LangSmith, customizing trace metadata and run names, or integrating LangChain with the LangSmith SDK. Provides guides on integrating LangSmith tracing into LangChain applications, configuring trace metadata and run names, distributed tracing, interoperability between LangChain and LangSmith SDK, and tracing LangChain invocations without environment variables.
|
||||
[How to: add metadata and tags to traces](https://docs.smith.langchain.com/how_to_guides/tracing/trace_with_langchain/#add-metadata-and-tags-to-traces): tracing LangChain applications with LangSmith, when logging metadata and tags to traces, and when customizing trace names and IDs. This page provides step-by-step guides on integrating LangSmith tracing with LangChain in Python and JS/TS, covering quick start instructions, selective tracing, logging to specific projects, adding metadata/tags, customizing run names/IDs, accessing run IDs, distributed tracing in Python, and interoperability with the LangSmith SDK.
|
||||
|
||||
[in this section of the LangSmith docs](https://docs.smith.langchain.com/how_to_guides/tracing/): configuring observability for LLM applications, accessing and managing traces, and setting up automation and monitoring. Guides on configuring tracing, using the UI/API for traces, creating dashboards, automating rules/alerts, and gathering human feedback for LLM applications.
|
||||
|
||||
## Integrations
|
||||
|
||||
### Featured Chat Model Providers
|
||||
|
||||
- [ChatAnthropic](https://python.langchain.com/docs/integrations/chat/anthropic/)
|
||||
- [ChatMistralAI](https://python.langchain.com/docs/integrations/chat/mistralai/)
|
||||
- [ChatFireworks](https://python.langchain.com/docs/integrations/chat/fireworks/)
|
||||
- [AzureChatOpenAI](https://python.langchain.com/docs/integrations/chat/azure_chat_openai/)
|
||||
- [ChatOpenAI](https://python.langchain.com/docs/integrations/chat/openai/)
|
||||
- [ChatTogether](https://python.langchain.com/docs/integrations/chat/together/)
|
||||
- [ChatVertexAI](https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/)
|
||||
- [ChatGoogleGenerativeAI](https://python.langchain.com/docs/integrations/chat/google_generative_ai/)
|
||||
- [ChatGroq](https://python.langchain.com/docs/integrations/chat/groq/)
|
||||
- [ChatCohere](https://python.langchain.com/docs/integrations/chat/cohere/)
|
||||
- [ChatBedrock](https://python.langchain.com/docs/integrations/chat/bedrock/)
|
||||
- [ChatHuggingFace](https://python.langchain.com/docs/integrations/chat/huggingface/)
|
||||
- [ChatNVIDIA](https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/)
|
||||
- [ChatOllama](https://python.langchain.com/docs/integrations/chat/ollama/)
|
||||
- [ChatLlamaCpp](https://python.langchain.com/docs/integrations/chat/llamacpp/)
|
||||
- [ChatAI21](https://python.langchain.com/docs/integrations/chat/ai21/)
|
||||
- [ChatUpstage](https://python.langchain.com/docs/integrations/chat/upstage/)
|
||||
- [ChatDatabricks](https://python.langchain.com/docs/integrations/chat/databricks/)
|
||||
- [ChatWatsonx](https://python.langchain.com/docs/integrations/chat/ibm_watsonx/)
|
||||
- [ChatXAI](https://python.langchain.com/docs/integrations/chat/xai/)
|
||||
|
||||
[All](https://python.langchain.com/docs/integrations/chat/): integrating chat models into an application, using chat models for conversational AI tasks, or choosing between different chat model providers. Provides an overview of chat models integrated with LangChain, including OpenAI, Anthropic, Google, and others. Covers key features like tool calling, structured output, JSON mode, local usage, and multimodal support.
|
||||
|
||||
## Glossary
|
||||
|
||||
[AIMessageChunk](https://python.langchain.com/docs/concepts/messages/#aimessagechunk): 'needing to understand messages and message structure for chat models, when working with chat history, and when integrating with chat model providers' Line 2: 'Detailed overview of the different message types used in LangChain for chat models, how messages are structured, and how to convert between LangChain and OpenAI message formats.'
|
||||
[AIMessage](https://python.langchain.com/docs/concepts/messages/#aimessage): building chat applications, when implementing tool calling, or when working with chat model outputs. Messages are the units of communication in chat models, representing input, output and metadata; topics include message types, roles, content, metadata, conversation structure, and LangChain's unified message format.
|
||||
[astream_events](https://python.langchain.com/docs/concepts/chat_models/#key-methods): LLM should read this page when: 1) Implementing an application that uses a chat model 2) Integrating chat models with other LangChain components 3) Planning for advanced chat model features like tool calling or structured outputs This page provides an overview of chat models in LangChain, including their key features, interfaces, integration options, tool calling, structured outputs, multimodality, context windows, and advanced topics like rate limiting and caching.
|
||||
[BaseTool](https://python.langchain.com/docs/concepts/tools/#tool-interface): needing to understand LangChain tools, wanting to create custom tools, or looking for best practices for designing tools. The page covers the tool abstraction in LangChain, which associates a Python function with a schema for name, description, and arguments. It explains how to create tools using the @tool decorator, configure the schema, handle tool artifacts, use special type annotations (InjectedToolArg, RunnableConfig), and provides an overview of toolkits.
|
||||
[invoke](https://python.langchain.com/docs/concepts/runnables/): learning how to use the Runnable interface, when working with custom Runnables, and when needing to configure Runnables at runtime. The page covers the Runnable interface, its methods for invocation, batching, streaming, inspecting schemas, and configuration. It explains RunnableConfig, custom Runnables, and configurable Runnables.
|
||||
[bind_tools](https://python.langchain.com/docs/concepts/tool_calling/#tool-binding): building applications that require an LLM to directly interact with external systems or APIs, when integrating tools or functions into an LLM workflow, or when fine-tuning an LLM to better handle tool calling. This page provides an overview of tool calling, which allows LLMs to invoke external tools or APIs with specific input schemas. It covers key concepts like tool creation, binding tools to LLMs, initiating tool calls from LLMs, and executing the called tools. It also offers guidance on recommended usage and best practices.
|
||||
[Caching](https://python.langchain.com/docs/concepts/chat_models/#caching): building chat applications, using LLMs for information extraction, or working with multimodal data This page discusses chat models, which are language models that operate on messages. It covers chat model interfaces, integrations, features like tool calling and structured outputs, multimodality, context windows, rate limiting, and caching.
|
||||
[Chat models](https://python.langchain.com/docs/concepts/multimodality/#multimodality-in-chat-models): needing to understand multimodal capabilities in LangChain, when working with multimodal data like images/audio/video, and when determining if a specific LangChain component supports multimodality. Provides an overview of multimodality in chat models, embedding models, and vector stores. Discusses multimodal inputs/outputs for chat models and how they are formatted.
|
||||
[Configurable runnables](https://python.langchain.com/docs/concepts/runnables/#configurable-runnables): trying to understand how to use Runnables, how to configure and compose Runnables, and how to inspect Runnable schemas. The Runnable interface is the foundation for working with LangChain components like language models, output parsers, and retrievers. It defines methods for invoking, batching, streaming, inspecting schemas, configuring, and composing Runnables.
|
||||
[Context window](https://python.langchain.com/docs/concepts/chat_models/#context-window): getting an overview of chat models, understanding the key functionality of chat models, and determining if this concept is relevant for their application. Provides an overview of chat models (LLMs with a chat interface), their features, integrations, key methods like invoking/streaming, handling inputs/outputs, using tools/structured outputs, and advanced topics like rate limiting and caching.
|
||||
[Conversation patterns](https://python.langchain.com/docs/concepts/chat_history/#conversation-patterns): managing conversation history in chatbots, implementing memory for chat models, understanding correct conversation structure. This page explains the concept of chat history, a record of messages exchanged between a user and a chat model. It covers conversation patterns, guidelines for managing chat history to avoid exceeding context window, and the importance of preserving conversation structure.
|
||||
[Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html/): working with document data, retrieving and processing text documents, integrating with text embedding and vector storage systems This page provides details on the Document class and its associated methods and properties, as well as examples of how to use it in various scenarios such as document loading, retrieval, and transformation
|
||||
[Embedding models](https://python.langchain.com/docs/concepts/multimodality/#multimodality-in-embedding-models): needing to understand multimodal capabilities of LangChain components, wanting to work with non-text data like images/audio/video, or planning to incorporate multimodal data in chat interactions. Provides an overview of multimodality support in chat models (inputs and tools), embedding models, and vector stores; notes current limitations and expected future expansions to handle different data types.
|
||||
[HumanMessage](https://python.langchain.com/docs/concepts/messages/#humanmessage): LLM should read this page when: 1) Understanding how to structure conversations with chat models, 2) Needing to work with different types of messages (user, assistant, system, tool), 3) Converting between LangChain and OpenAI message formats. Messages are the units of communication used by chat models, representing user input, assistant output, system instructions, and tool results. Key topics include message structure, types (HumanMessage, AIMessage, SystemMessage, ToolMessage), multimodal content support, and integration with OpenAI message format.
|
||||
[InjectedState](https://python.langchain.com/docs/concepts/tools/#injectedstate): learning about LangChain's tools, creating custom tools, or integrating tools with chat models. Provides conceptual overview of tools - encapsulating functions with schemas for models to call. Covers creating tools with @tool decorator, tool interfaces, special type annotations, artifacts, best practices, and toolkits.
|
||||
[InjectedStore](https://python.langchain.com/docs/concepts/tools/#injectedstore): needing to understand how to create and use tools in LangChain, when needing to pass runtime values to tools, and when needing to configure a tool's schema. Tools are a way to encapsulate functions and their schemas to be used with chat models that support tool calling. The page covers the tool interface, creating tools with the @tool decorator, using tools directly, configuring tool schemas, returning artifacts from tools, and special type annotations like InjectedToolArg and RunnableConfig.
|
||||
[InjectedToolArg](https://python.langchain.com/docs/concepts/tools/#injectedtoolarg): trying to understand how to create and use tools in LangChain, when needing to configure tool schemas, and when wanting to return artifacts from tools. Tools provide a way to encapsulate Python functions and schemas to be passed to chat models for execution. The page covers creating tools with the @tool decorator, configuring tool schemas, special type annotations, and tool artifacts.
|
||||
[input and output types](https://python.langchain.com/docs/concepts/runnables/#input-and-output-types): needing to interact with LangChain components, wanting to understand the core Runnable interface, or composing complex chains using LCEL. Covers the Runnable interface that defines a standard way to invoke, batch, stream and inspect components; the RunnableConfig for setting runtime options; creating custom Runnables; configurable Runnables; and how input/output types, schemas, and streaming work.
|
||||
[Integration packages](https://python.langchain.com/docs/concepts/architecture/#integration-packages): determining the overall architecture of LangChain, understanding the different components and packages in the LangChain ecosystem, or deciding which packages to import for a specific use case. This page provides an overview of the different packages that make up the LangChain framework, including langchain-core, langchain, integration packages, langchain-community, langgraph, langserve, and LangSmith, and explains the purpose and contents of each package.
|
||||
[Integration tests](https://python.langchain.com/docs/concepts/testing/#integration-tests): needing guidance on testing LangChain components, understanding different types of tests (unit, integration, standard), or wanting to contribute by adding tests to an integration. Provides an overview of unit tests, integration tests, and standard tests in the LangChain ecosystem, including definitions, examples, and how to implement them for new tools/integrations.
|
||||
[invoke](https://python.langchain.com/docs/concepts/runnables/): learning how to use the Runnable interface, when working with custom Runnables, and when needing to configure Runnables at runtime. The page covers the Runnable interface, its methods for invocation, batching, streaming, inspecting schemas, and configuration. It explains RunnableConfig, custom Runnables, and configurable Runnables.
|
||||
[JSON mode](https://python.langchain.com/docs/concepts/structured_outputs/#json-mode): LLM should read this page when: 1) It needs to return structured output that conforms to a specific schema, 2) It needs to store model output in a database, 3) It needs to ensure model output matches a predefined format. This page covers how to define an output schema, and techniques like tool calling and JSON mode that allow models to return structured output conforming to that schema, as well as a helper method to streamline the process.
|
||||
[langchain-community](https://python.langchain.com/docs/concepts/architecture/#langchain-community): learning about the structure of LangChain, deploying LangChain applications, or needing an overview of the LangChain ecosystem. This page gives an overview of the different packages, components, and services that make up the LangChain framework, including langchain-core, langchain, integration packages, langchain-community, LangGraph, LangServe, and LangSmith.
|
||||
[langchain-core](https://python.langchain.com/docs/concepts/architecture/#langchain-core): needing an overview of LangChain's architecture, when considering integrating external packages, or when exploring the LangChain ecosystem. Outlines the main components of LangChain (langchain-core, langchain, integration packages, langchain-community, langgraph, langserve, LangSmith) and their roles, providing a high-level architectural overview.
|
||||
[langchain](https://python.langchain.com/docs/concepts/architecture/#langchain): looking to understand the overall architecture of LangChain, when trying to determine what LangChain packages to install, or when wanting an overview of the various LangChain projects. This page outlines the hierarchical structure of the LangChain framework, describing the purpose and contents of key packages like langchain-core, langchain, integration packages, langchain-community, langgraph, langserve, and LangSmith.
|
||||
[langgraph](https://python.langchain.com/docs/concepts/architecture/#langgraph): developing applications with LangChain, seeking to understand the overall architecture of LangChain, planning to contribute to or integrate with LangChain The page outlines the layered architecture of LangChain, describing the core abstraction layer, the main LangChain package, integration packages, community integrations, LangGraph for stateful agents, LangServe for deployment, and LangSmith developer tools
|
||||
[Managing chat history](https://python.langchain.com/docs/concepts/chat_history/#managing-chat-history): understanding and managing chat history, learning about conversation patterns, following correct chat history structure. Explains chat history concept, provides guidelines for managing chat history, discusses conversation patterns involving users, assistants, and tools.
|
||||
[OpenAI format](https://python.langchain.com/docs/concepts/messages/#openai-format): building chat applications, working with chat models, or consuming message streams. This page covers the structure and components of messages used in chat models, including roles, content, usage metadata, and different message types like HumanMessage, AIMessage, and ToolMessage.
|
||||
[Propagation of RunnableConfig](https://python.langchain.com/docs/concepts/runnables/#propagation-of-runnableconfig): LLM should read this page when: learning about the LangChain Runnable interface, working with Runnables in LangChain, understanding how to configure and execute Runnables. The page covers the Runnable interface in LangChain, including invoking/batching/streaming Runnables, input/output schemas, configuring Runnables, creating custom Runnables, and working with configurable Runnables.
|
||||
[rate-limiting](https://python.langchain.com/docs/concepts/chat_models/#rate-limiting): 1) working with chat models, 2) integrating tool calling or structured outputs, 3) understanding chat model capabilities. Overview of chat model interface, inputs/outputs, standard parameters; tool calling and structured output support; multimodality; context window; advanced topics like rate limiting, caching.
|
||||
[RemoveMessage](https://python.langchain.com/docs/concepts/messages/#removemessage): needing information on the structure of messages used in conversational AI models, wanting to understand how messages are represented in LangChain, or looking for details on specific message types like SystemMessage, HumanMessage, and AIMessage. Messages are the basic units of communication in conversational AI models, containing a role (e.g. user, assistant), content (text or multimodal data), and metadata; LangChain provides a standardized message format and different message types to represent various components of a conversation.
|
||||
[role](https://python.langchain.com/docs/concepts/messages/#role): understanding how to structure messages for chat models, accessing details about different LangChain message types, or converting between LangChain and OpenAI message formats. Messages are the core unit of communication in chat models, representing input/output content and metadata; LangChain defines SystemMessage, HumanMessage, AIMessage, ToolMessage and others to standardize message format across providers.
|
||||
[RunnableConfig](https://python.langchain.com/docs/concepts/runnables/#runnableconfig): needing to understand the Runnable interface, invoking and configuring Runnables, and creating custom Runnables. The page covers the Runnable interface's core concepts, methods like invoke, batch, and stream, input/output types, configuring Runnables with RunnableConfig, creating custom Runnables from functions, and using configurable Runnables.
|
||||
[Standard parameters for chat models](https://python.langchain.com/docs/concepts/chat_models/#standard-parameters): building applications using chat models, working with chat models for tool calling, structured outputs or multimodal inputs/outputs. Covers overview of chat models, integrations, interfaces, tool calling, structured outputs, multimodality, context window, rate-limiting, and caching of chat models.
|
||||
[Standard tests](https://python.langchain.com/docs/concepts/testing/#standard-tests): needing guidance on testing LangChain components, or wanting to understand the different types of tests used in LangChain. This page discusses unit tests for individual functions, integration tests for validating multiple components working together, and LangChain's standard tests for ensuring consistency across tools and integrations.
|
||||
[stream](https://python.langchain.com/docs/concepts/streaming/): [building applications that use streaming, building applications that need to display partial results in real-time, building applications that need to provide updates on pipeline or workflow progress] 'This page covers streaming in LangChain, including what can be streamed in LLM applications, the streaming APIs available, how to write custom data to the stream, and how LangChain automatically enables streaming for chat models in certain cases.'
|
||||
[Tokens](https://python.langchain.com/docs/concepts/tokens/): needing to understand tokens used by LLMs, when dealing with character/token counts, when working with multimodal inputs Tokens are the fundamental units processed by language models. A token can represent words, word parts, punctuation, and other units. Models tokenize inputs, process tokens sequentially, and generate new tokens as output. Tokens enable efficient and contextual language processing compared to characters.
|
||||
[Tokens](https://python.langchain.com/docs/concepts/tokens/): needing to understand tokens used by LLMs, when dealing with character/token counts, when working with multimodal inputs Tokens are the fundamental units processed by language models. A token can represent words, word parts, punctuation, and other units. Models tokenize inputs, process tokens sequentially, and generate new tokens as output. Tokens enable efficient and contextual language processing compared to characters.
|
||||
[Tool artifacts](https://python.langchain.com/docs/concepts/tools/#tool-artifacts): needing to understand what tools are, how to create and use them, and how they integrate with models. Explains what tools are in LangChain, how to create them using the @tool decorator, special type annotations for configuring runtime behavior, how to use tools directly or pass them to chat models, and best practices for designing tools.
|
||||
[Tool binding](https://python.langchain.com/docs/concepts/tool_calling/#tool-binding): determining if tool calling functionality is appropriate for their application, understanding the key concepts and workflow of tool calling, and considering best practices for designing tools. This page covers an overview of tool calling, key concepts like tool creation/binding/calling/execution, recommended usage workflow, details on implementing each step, and best practices for designing effective tools.
|
||||
[@tool](https://python.langchain.com/docs/concepts/tools/#create-tools-using-the-tool-decorator): needing to understand tools in LangChain, when creating custom tools, or when integrating tools into LangChain applications. Provides an overview of tools, how to create and configure tools using the @tool decorator, different tool types (e.g. with artifacts, injected arguments), and best practices for designing tools.
|
||||
[Toolkits](https://python.langchain.com/docs/concepts/tools/#toolkits): creating custom Python functions to use with LangChain, configuring existing tools, or adding tools to chat models. Explains the tool abstraction for encapsulating Python functions, creating tools with the `@tool` decorator, configuring schemas, handling tool artifacts, special type annotations, and using toolkits that group related tools.
|
||||
[ToolMessage](https://python.langchain.com/docs/concepts/messages/#toolmessage): understanding the communication protocol with chat models, working with chat history management, or understanding LangChain's Message object structure. Messages are the unit of communication in chat models and represent input/output along with metadata; LangChain provides a unified Message format with types like SystemMessage, HumanMessage, AIMessage to handle different roles, content types, tool calls.
|
||||
[Unit tests](https://python.langchain.com/docs/concepts/testing/#unit-tests): developing unit or integration tests, or when contributing to LangChain integrations Provides an overview of unit tests, integration tests, and standard tests used in the LangChain ecosystem
|
||||
[Vector stores](https://python.langchain.com/docs/concepts/vectorstores/): LLM should read this page when: 1) Building applications that need to index and retrieve information based on semantic similarity 2) Integrating vector databases into their application 3) Exploring advanced vector search and retrieval techniques Vector stores are specialized data stores that enable indexing and retrieving information based on vector representations (embeddings) of data, allowing semantic similarity search over unstructured data like text, images, and audio. The page covers vector store integrations, the core interface, adding/deleting documents, basic and advanced similarity search techniques, and concepts like metadata filtering.
|
||||
[with_structured_output](https://python.langchain.com/docs/concepts/structured_outputs/#structured-output-method): [needing to return structured data like JSON or database rows, working with models that support structured output like tools or JSON modes, or integrating with helper functions to streamline structured output] [Overview of structured output concept, schema definition formats like JSON/dicts and Pydantic, model integration methods like tool calling and JSON modes, LangChain structured output helper method]
|
||||
[with_types](https://python.langchain.com/docs/concepts/runnables/#with_types): learning about the Runnable interface in LangChain, understanding how to work with Runnables, and customizing or configuring Runnables. The page covers the Runnable interface, optimized parallel execution, streaming APIs, input/output types, inspecting schemas, RunnableConfig options, creating custom Runnables from functions, and configurable Runnables.
|
||||
@@ -1,262 +1,260 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you getting started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"Enter your __ModuleName__ API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"llm = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This will help you getting started with __ModuleName__ [chat models](/docs/concepts/chat_models). For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about models, prices, context windows, etc. See https://python.langchain.com/docs/integrations/chat/openai/ for an example.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [Chat__ModuleName__](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html) | [__package_name__](https://python.langchain.com/api_reference/__package_name_short_snake__/) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | ✅/❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"Enter your __ModuleName__ API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import Chat__ModuleName__\n",
|
||||
"\n",
|
||||
"llm = Chat__ModuleName__(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Chat__ModuleName__ features and configurations head to the [API reference](https://python.langchain.com/api_reference/__package_name_short_snake__/chat_models/__module_name__.chat_models.Chat__ModuleName__.html)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,219 +1,217 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__Loader\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all __ModuleName__Loader features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name___loader.__ModuleName__Loader.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/__module_name___loader)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__Loader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name__loader.__ModuleName__Loader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| __ModuleName__Loader | ✅/❌ | ✅/❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ document loader you'll need to install the `__package_name__` integration package, and create a **ModuleName** account and get an API key.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"Enter your __ModuleName__ API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**.\n",
|
||||
"\n",
|
||||
"- TODO: Add any other required packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import __ModuleName__Loader\n",
|
||||
"\n",
|
||||
"loader = __ModuleName__Loader(\n",
|
||||
" # required params = ...\n",
|
||||
" # optional params = ...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load\n",
|
||||
"\n",
|
||||
"- TODO: Run cells to show loading capabilities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load\n",
|
||||
"\n",
|
||||
"- TODO: Run cells to show lazy loading capabilities. Delete if lazy loading is not implemented."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this document loader\n",
|
||||
"\n",
|
||||
"E.g. using specific configs for different loading behavior. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__Loader features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name___loader.__ModuleName__Loader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__Loader\n",
|
||||
"\n",
|
||||
"- TODO: Make sure API reference link is correct.\n",
|
||||
"\n",
|
||||
"This notebook provides a quick overview for getting started with __ModuleName__ [document loader](https://python.langchain.com/docs/concepts/document_loaders). For detailed documentation of all __ModuleName__Loader features and configurations head to the [API reference](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name___loader.__ModuleName__Loader.html).\n",
|
||||
"\n",
|
||||
"- TODO: Add any other relevant links, like information about underlying API, etc.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/web_loaders/__module_name___loader)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__Loader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name__loader.__ModuleName__Loader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
"| __ModuleName__Loader | ✅/❌ | ✅/❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ document loader you'll need to install the `__package_name__` integration package, and create a **ModuleName** account and get an API key.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"Enter your __ModuleName__ API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated best in-class tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_community**.\n",
|
||||
"\n",
|
||||
"- TODO: Add any other required packages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and load documents:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import __ModuleName__Loader\n",
|
||||
"\n",
|
||||
"loader = __ModuleName__Loader(\n",
|
||||
" # required params = ...\n",
|
||||
" # optional params = ...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load\n",
|
||||
"\n",
|
||||
"- TODO: Run cells to show loading capabilities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load\n",
|
||||
"\n",
|
||||
"- TODO: Run cells to show lazy loading capabilities. Delete if lazy loading is not implemented."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this document loader\n",
|
||||
"\n",
|
||||
"E.g. using specific configs for different loading behavior. Delete if not relevant."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__Loader features and configurations head to the API reference: https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.__module_name___loader.__ModuleName__Loader.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -1,238 +1,236 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "67db2992",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: __ModuleName__\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"Enter your __ModuleName__ API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"llm = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = llm.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" \"How to say {input} in {output_language}:\\n\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Make sure API reference link is correct\n",
|
||||
"\n",
|
||||
"This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__LLM` features and configuration options, please refer to the [API reference](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"- TODO: Fill in table features.\n",
|
||||
"- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n",
|
||||
"- TODO: Make sure API reference links are correct.\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/llms/__package_name_short_snake__) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [__ModuleName__LLM](https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html) | [__package_name__](https://api.python.langchain.com/en/latest/__package_name_short_snake___api_reference.html) | ✅/❌ | beta/❌ | ✅/❌ |  |  |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name__` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"- TODO: Update with relevant info.\n",
|
||||
"\n",
|
||||
"Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the __MODULE_NAME___API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bc51e756",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"__MODULE_NAME___API_KEY\"):\n",
|
||||
" os.environ[\"__MODULE_NAME___API_KEY\"] = getpass.getpass(\"Enter your __ModuleName__ API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6e1ca6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "196c2b41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "809c6577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain __ModuleName__ integration lives in the `__package_name__` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "59c710c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU __package_name__"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0a760037",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- TODO: Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0562a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from __module_name__ import __ModuleName__LLM\n",
|
||||
"\n",
|
||||
"llm = __ModuleName__LLM(\n",
|
||||
" model=\"model-name\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
" max_retries=2,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ee90032",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n",
|
||||
"\n",
|
||||
"- [ ] TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "035dea0f",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_text = \"__ModuleName__ is an AI company that \"\n",
|
||||
"\n",
|
||||
"completion = llm.invoke(input_text)\n",
|
||||
"completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "add38532",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n",
|
||||
"\n",
|
||||
"- TODO: Run cells so output can be seen."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "078e9db2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" \"How to say {input} in {output_language}:\\n\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e99eef30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TODO: Any functionality specific to this model provider\n",
|
||||
"\n",
|
||||
"E.g. creating/using finetuned models via this provider. Delete if not relevant"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9bdfcef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `__ModuleName__LLM` features and configurations head to the API reference: https://api.python.langchain.com/en/latest/llms/__module_name__.llms.__ModuleName__LLM.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.11.1 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user