mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-03 03:38:06 +00:00
docs: Upgrade examples with RunnableWithMessageHistory to langgraph memory (#26855)
This PR updates the documentation examples that used RunnableWithMessageHistory to show how to achieve the same implementation with langgraph memory. Some of the underlying PRs (not all of them): - docs[patch]: update chatbot tutorial and migration guide (#26780) - docs[patch]: update chatbot memory how-to (#26790) - docs[patch]: update chatbot tools how-to (#26816) - docs: update chat history in rag how-to (#26821) - docs: update trim messages notebook (#26793) - docs: clean up imports in how to guide for rag qa with chat history (#26825) - docs[patch]: update conversational rag tutorial (#26814) --------- Co-authored-by: ccurme <chester.curme@gmail.com> Co-authored-by: Vadym Barda <vadym@langchain.dev> Co-authored-by: mercyspirit <ziying.qiu@gmail.com> Co-authored-by: aqiu7 <aqiu7@gatech.edu> Co-authored-by: John <43506685+Coniferish@users.noreply.github.com> Co-authored-by: Erick Friis <erick@langchain.dev> Co-authored-by: William FH <13333726+hinthornw@users.noreply.github.com> Co-authored-by: Subhrajyoty Roy <subhrajyotyroy@gmail.com> Co-authored-by: Rajendra Kadam <raj.725@outlook.com> Co-authored-by: Christophe Bornet <cbornet@hotmail.com> Co-authored-by: Devin Gaffney <itsme@devingaffney.com> Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com>
This commit is contained in:
parent
44eddd39d6
commit
de0b48c41a
1
docs/cassettes/chatbot_16.msgpack.zlib
Normal file
1
docs/cassettes/chatbot_16.msgpack.zlib
Normal file
@ -0,0 +1 @@
|
||||
eNptVQtsE3UYL4+oiUpIRAGNoQzEV+9613dBhK5rWTe2butgDwP1evdve/Tu/rd79DHCECRBMRGOh4+YkLCVlizb2ITAQIeCEdAIaHTiUDSCgkFQgxLAGPF/XSdb4JI+/t/3/X/f6/d9tzafBJLMQmFcFysoQKJoBR1kbW1eAi0qkJV1OR4occhka4Kh+g5VYoeeiSuKKM81mymRxaEIBIrFacibk6SZjlOKGf0XOVCAyUYgkxl6aWUJD2SZigG5ZK7xhZUlNESuBAUdSsrZmcbAk7yxFEbwEpOxRIIc0OWqDKSSVcuQhIcM4HRRTFQwG8R4VmB1SwHJSPQrKxKgeHSIUpwMkEABvIgSUVRJRyJw56p8HFAMSvN7w+RsHMqK1jM29N0UTQOEDgQaMqwQ07pjraxoMjIgylEK6ETxCqBQGK0zAYCIURybBLnhW1ovJYocS1O63rxChkJXMT9MyYjgTnWnnhuGqiEo2t4gCsITMNdkUI0FI4nbXDjRm8ZkhWIFDhUN4ygUT04s6N8brRApOoFAsGL/tNzw5Z7RNlDWdlZRdDA0BpKS6Li2k5J4h23PaLmkCgrLAy3vrbnTXVF5250VJ0nc2TcGWM4ItLaz0Ij9Yy4DRcpgNEQY2g4iR0OYYIE2dDUcpqPhCD+/MhX3NvjcvkQgE/Evaom6oKXBllGVQBR4EklHxJKmrZ5YY8a7JIGRTovT5nDYSBdG4gRO4iRWQTjgoromni+TGKLG77QmZWFpNFjqaFWZtAf3lkb9ajAN1DIcOJsbY4FUucXS0lRaugJvttaHV0hKeUuSteLVcqiijqpM+SvCjb5kap4RRacmWWZ+uZ3ylfPxeHiJB/IysaQqmai2OJx1lYnWJGyx2JdCAvf5M77mitio8AirFSOKEToIm4vQn54RbnBAiClxrYO0uHZJQBbRtICXc6hkiiqvzSIegs+O54tj0x6svE3hh7NliJPagF9iTUaL0xgCotFCWGxG0j3X5pxLEMZFVfVd3qKb+rtSsK9eogQ5imjoG6F8no6rQgIwnd67kn1AJzvqpB4+mlEMpEUoA6wYldbViNUN7wssULZneLIwKMUogW0tuNUGCqxPtaZTDK0yTDyZ4gl3q83KRoBKR/cWr4gS1N2ggDBe1jqsbqKnqBnhXSfKlcBIAiPIA2j0WRqNmZ6MCCUFkwGNNpSS0YZMPJXWZ2y+lbRbHajw84ysQHMqA0JqpAzyiJnyPKMoAQ5SzME0hvYF4FieRY0pfBe3n6xl7ehy/50GCkwAtCfztkJbiUOjLSSg4+tJ3Iaxud3u9+9uNAJlRSZup/vgWCsZjI6GtPBy/50GRYh2Qu5Kj1hjLKMNzUaHsJ2hKYsjQtppB0m5iQjDRJy0Gx1pO2F1MfRurx/zUnQcYKEC/7R8WVO1pyrg3deIjSYSFhSH3w55AcoCG43mQkBCjdE6aQ6qDFqWEsghrDpPk7bXRbsBQdAWwk1H3ITDjpWiNTSC9j/tsvqmLbwm1uT0dgqxj8cJM167z1B4JqDPrVvKphPCt8Tkdb+3briyvfvII+t2DT7RG5l9cubCoG9N++SnNrzh3NH/wAH/pXvarpduviUd962f3tYxlAkaNjWbxl1b+upvA1VvpgYvnb7adzC/xO/7ZkH3jOXPdv4b8CxoO8cZ+zOO5uqvGk4M7Pe/cpb59cWtb81Uaxlfm7zFfGij/1R334SvMwG5eqW0NfqnKXjRd/BGMrt/8Zky04+p8YY/nm9bv73Hvrpv3c3L2b/m+Aenm05tNBC9j0+pJ+um/DwxNjW056HzRy8l1m54x9x7wfn5p4dnLZ54OXPvke4zPyzfcphL1j545ezJMx1troa+725+WPXc37FV3tfP7Zvk9yU/gh29x76YP9uw+uiW1IyehTP6r9fd/8E050T83acnmb3ht4+5axq3DvwylK85+2gtc36Qc12bdXrZ+AsXN5/fNu167eIwPK7OubHwyw2PhadmOq62f7Lt6E+b/rlXL+oEQ2xNDRkabzD8B46XMHk=
|
@ -1 +1 @@
|
||||
eNptVWtsFFUULtYQJJpQE1ESHuMKgZDOdGb23aZqu6UPYNvSXboUYjZ379zdne7O3Ok82t1FUBBBEJAJPkhQCKXsam15hFIeUjEQowKigtEUgR8o0UQFAaMGCXh3u5U2MMk+7jnnfuf1nTMrM+1I1UQsj+kRZR2pAOrkoJkrMypqM5Cmr0pLSI9ioauxweffaaji4OyoritaaUkJUEQGK0gGIgOxVNLOlcAo0EvIfyWOcjBdISwkB1cstUhI00AEaZZSaslSC8TElayTg6VWfJqqmylRlThkKaYsKo6jrNjQkGpZ9gKRSFhA8awooui0lbHTuqGGcNZWJlKO/Gq6ioBEDmEQ1xAR6EhSSCbEMIvFMs5lmSgCAsnzUsGErijWdHP36Nj3AAgRwUcyxIIoR8zeSEpUiikBheNAR90kYBnlKmN2xxBSaBAX21F66Ja5FyhKXIQgqy9p1bDck0+Q1pMKul/dnc2OJuWQdbOvgQRRUVfSmCRFlimOsbkYdm+C1nQgynFSNToOSDxpJaf/aKRCATBGQOh8A8300OXdI22wZu7yAtjgGwUJVBg1dwFVctj2j5SrhqyLEjIznsb73eWV99xZGY5jnPtGAWtJGZq7co04OOoy0tUkDTHBMHewaYhxTETm4I1gEIaDIalcrEoGXHWsKiwKIWtUUoWKOczCqnom5bQ3zautbU44YIvuratvd0Oac/JOm51321maY1iGYzg6CZy+KK+wAc1wArbVmhJBKiDH2oJigAmwsZhtkTjXb8MLbWExZWs1HLFGX62gR6QgCFS2yj6/p8kXYRa0ekONNbHUYjExN2ibgyNlFInOaBeF8jrWUW1vrNZc9bWVzWHF1eGtku1aBT8/mJi3ACt6m1evYRK+igUuPCI8h9VBs/kIHazNxWaf3cPciCM5okfNnRzvfl9FmkLGBb2SJiXTDW1lF+EhOv15Jj83nQ3z7lF4YlcV4aQ5UK2KxRTvpHxIoXiWt1Gco5TlS608VeP193jybvwPpOA+vwpkLUxoOGeY8hkYNeQYEro9DyT7QJbspJPZ8MmU0iihYA3R+ajMnkV009DCoOuq9g9NFo3VCJDFVM6tOZBjfUcq0SFAQxCi7R0S607ZrGIIGTDcl7+iqDjrhgRES5q5k+ccu/OaYd51k1xJ51ma5Q6T0RchGbNsMgpWdVpDkKwoPWkOFksgkZ2xcitntzpI4csoUYZxQ0A+I1SFJcJMrYxSVBTHQDiSoMm+QHFREkljct/59aeZXXZy+dD9BjqOIbIoM7ZcW9mPR1qoKIufTeIejM3tdh99sNEwlJWYuJ3uI6OtNDQyGo6XtEP3G+QhOlmtJzFsTYuCOTidHIKAFTi3y40EIRQK8XaEoBO5nA6BF0JOwQWFPZ5q2gNgFNG+HP/MTFVLfYW3ztO/iB5JJLpBGXo9ZGSsyWI4nPYhlTTG7IZxbAhkWaooTbCaKlrMPhd0w5Az7ASQ5dysw01XkjU0jPY/7bqymzYD4qR37dDcH7WWW0ptNquljJJAuctBqpt7iaxIZ3stRz4dA6a9Pq4g9xSSz927+qZ5+Dg7YdW12+t6v3p4hljUyiyc9rj/3dltFQufmBR5RJ6CBeyfdflYwPHG154S9+l/tp4+s7yA0meMn9D6TIu9t27q9pmpO+H+c1c6txafvbXm7K7zp/m/+fecoafGLV7Wedlqb/q2b2zL5vLzJ3cIj34wOH37hQ03/22bumH1bNuYD5fUyzU/rio72Dn9yxvX117Y1PzLnDHXCgteXnZ79cb+472zqCnb3ho78I593eUb45538RVFZ5f+3rJ44olLfmGS1FPugeO2RY1C662xM+ijx4tWTjE+8X9x8YT9N+VictLJH85s2bz8UB/nP7ix+c5PwT8P9J/Hc9ec3fzYS3/N733tKij4o2jP1TuTLYtrvKdmnHvy/IQtVz5zfXPtRbRtYtH8Yu+en+9WoleLD1wPnFoLn5N+fZYaeHN9YPyxm+u9h7/rr/6+dMqa9Usmjz2Wq2NhwfIg+3bNQwUF/wHhwSyT
|
||||
eNptVQlsFGUULiURa5RgIgJBw1A5EuhMZ3ans91y1O0e7baWrW2BFpA6nfl3d9id+afzz+wFCHKoAa8BTYzhCLTdNbW2HBUQBUWDJ55RoAkxUWNUqhGRgGAI/rPdSkuZZI//Hd97/3vfe7MhEwMakqAypltSdKDxgo4PyNyQ0UCbAZC+KS0DPQzFjrpAQ2O7oUn9c8K6rqKy4mJelSioAoWXKAHKxTGmWAjzejH+r0ZBFqajFYrJ/qurC2WAEB8CqLCMWL66UIA4lKLjQ2GVNJ3wz5aJCthKFRYRhRqMAktuIKAVri0ibjG27KYTVTBOCLxC+AkeIQnpRBIahA5FPlk+HGNQyWPfW4GW4jxnI0JOEgovg/LRgR/DEhmKIGqJQqpOspCUJUWyLBUsY/Av0jXAy/gQ5KMIYIEOZBVXUDc0C4mmHGszYcCLuL7f503oCEOkmz0ja9bLCwLA6EARoCgpIfONUEpSiwgRBKO8DrpwygrIdsTsigCgknxUioH0oJe5j1fVqCTwlr54FYJKd+6KpJ5UwWh1l3U3ErdB0c2+AE7C5S+uS+LmKgRDsaUUvS9B4npJShR3i4zyOJ+0mtW/PVyh8kIEg5A54pjpQeee4TYQmZ21vBBoGAHJa0LY7OQ1mWMPDpdrhqJLMjAz7rrR4XLKm+HsFMNQjv0jgFFSEczObCMOj3AGupYkBYgxzD10WoAwIgGz/2JLixBsaZUX1MTD7qVepzfiT7b6KtuCpdC2lE0auj8IXJEY12pLCHZXqCnpXhwhGYfNwXIcy5SSDEVTDMWQ1TQHK+ubZdmjiXSdz2GPIWVJMFDBpQwx4aLcFUGfEUgAw0MBx7KmkD9eZbO1NVdUrKKW2RtbVml6VVtMslOLUEN1PV8T91W3NHlj8XkEzs6ISeKCqhLeWyWHwy2LXVBG9OLaWGSRjXPU10RSMdhmK1kCacrrS3qXVYeGpUfb7SSdy5Cj2VLaenqGuBEFSkgPm+021vmaBpCKxxRsTOOS6Qba0IF5CE59nMnN695AzU0KT+zwYE6ax3yaVETYHEQDUAkbbWMJxlnGOspomqisbex258I03paC+xs1XkFBTEPvEOUzQthQIkDsct+W7McssuNOWunjGSVBQoUIkLmszO4msn5wUZF+z8HBySKhFuIVKZUNax7Lsj6eSsRFwRDFcCwu084Ua5dagSEE+3IuqgatMDghUkZmO+uw9+Q0Q7zrwnelSYYmaeYtPPqSgMfMuowKNZ1EQMCrUU+a/UUyn7BmbIGdKbFzuPDzCEkRooYIGoxWD5QxM9E8QtVAFPLi0QSJ9wWISrKEG5P9zq1dZHaUYOcjow10GAF4QWfYbFvp48MtNGDhW5e4CcM6nc53bm80BGXHJk6OPTrSCoHh2TA2GR0ZbZCD2Euj7sSQNSmJZv8MfGhhgw5At5ZwzmApzTEcC4CDc7I4I04oLWEFsdftI928EAZkQ5Z/ZsbTvMhV63cfaiKHE4kMqIOvpYwCkSIFg+kGoOHGmF1CFBoiXpYaSGOselez2VcqOAFNC5wNE99JcyVkBV5DQ2j/067D2rTZ99OTaaudSujkmNi0rXfmZZ+x+HPjhv7iiUUf0BM8f17dcvKXO96c3rdnkmdMwaZv53ZN2LJD3H7k7MrPYuu7zr/7UEbOH0ctPPX3s8HYtDH0o/tn7jvw8/FG34fxK68//VwisWLdF+Urfy9b++lKePzwC5uPLR2YxBWMn3Oo6drBgQMzwIyU69ozNVvPCVU7yW3est2ZT/rGbZnz0onTk6dl7pu7x7vjke3sLurH5ph2eEa1Hi2A9+a9d3ydO/DYD47lFbu/m3vXzp/uOfDzhYL1zdtc/DnnD/PP7Kr1NW27/Nq/56eeLwr3TNi0ceLESRc8LXXaPV9+tZfd1z5+/uI1joJ/LsefPXkUzX/k650vzb5OX9r27eSazfP+cvVuvr/nc1KrLNPVXXcP/DY9P/Hnlfi6WYzgn7J79bWHH8zv/UZ4YGrvwvf7p6Qurng+9er10KdzL62oujLw1O9nA78G735l/Mx/Xz71x47Kl89UOMLPTfxo++lZvQcH8q+uMTuLyjzl2eKOzfM9UR57PD8v7z9er1OE
|
@ -1 +0,0 @@
|
||||
eNptVX1sE2UYB1EDRBE/EJ1Dj4liyO561+8ykWwd2zroVtZBh0Sbt3dv21vv7j3uvdvaDglfKhFCcjFGifwho7SyzLEBCoKoifELTRDRxCYMFRNFiESMYogovtd1sjmatL33+fg9z/s8v+e5jYUuqGERKZP7RUWHGuB1csDmxoIG1xgQ65vzMtSTSMiFWsPtuw1NLC5I6rqKF9psQBUZpEIFiAyPZFsXZ+OTQLeRZ1WCJZhcDAmZ4t89VTLEGCQgrlpIre6p4hEJpejkUNUkzqUC82WqDsWqqqkqDUnQEhsYalXPVFPjbaEkIctyLtWEuikeKFSAAhiLWKcyyKB0JIDM4rEwI0pA3P+PFSGZzseUnKEUIMPFE2M/RSQyEqBkiRKqTjsYF60bWgxZtgqRcuQf6xoEMjnEgYQhEehQVkkViaGFxTKeZwpJCARS4zOTZuaSCOvmwPi67QM8Dwk+VHgkiErCfCORFdVqSoBxCeiwjyStwFJXzL4UhCoNJLEL5ke8zEGgqpLIA0tv68RI6S9fktYzKpyo7rNuR5NWKLp5sJUkURuwhTKkwQrFMU4vww6maVIxUZFIx2gJkHzyakl/dKxCBXyKgNBl8pj5EeeBsTYIm3uCgG8Nj4MEGp809wBNdjsPjJVrhqKLMjQL/tDEcGXl9XAOhuMYz9A4YJxReHNPqRGHxjlDXcvQPCIY5i42zyOUEqFZ/C0a5ePRmLxIrM9EvAFWEzpi0JGUNaF2CbOivoXJelxtS5uaVqbd/Co9GGjp8vE057F7nC67z8XSHMMyHMPRGeAJJ+0qG8GGB7CdjqwIshEltSYqRpgIm0o5O8Tmdida4YyLWWen4U6Fwk2CnpCjIFLXqYTb/W3hBLO8MxgLNaayT4rp5qhzCUrUUCQ7o0sUFgVYd4Mr1IC9LU11K+OqtztYr7hwrX1ZNL10OVL1NUG9kUmHa5d70Zj03A43zZYzdLNOL2t9Bka5IUEloSfN3XaX43UNYpWMKtyUJyXTDbwxR3gIP/+kUJ7Z3tal1yk8K1dPOGkea9DEasruocJQpeys3Ulx7oWsfaHDQTUG2/v95TDtN6TgULsGFBwnNFwySvkCnzSUFBT6/Dck+zGL7KSTVvpkSmmYVhGGdDkrs7+DbhtZVnSg/sDIZNFISwBFzJbCmsdKrO/OprsF3hCEZFe3zPqyTocYgwYfP1h2UTVkhSEJ0TImxWHdA2XNKO/6yF1J51ma5d4moy/yZMysy6hI02kMebIe9YxZrJZB2pqxRQ7O5XCTwtdQosJLhgDDRqweyYSZuIZSNSghIBxJ02RfQEmURdKY0m959WIz5yLOhyca6CgFyZIuOEttZd8da6FBC9+6xHUYp8/ne+fGRqNQDmLic7uOjLfCcGw2nF3GhycalCF6WdyfHrWmRcEsziOHqNfpsQMHgWcFzg49LiB4WBfv5liHN84JLNznb6D9gE9COlzin1moX9VSGwz43+qgxxKJblVHXk0FBWFFjMfzYaiRxph9vIQMgSxLDeYJVlvtKvOgl/fxMU88xnl9wMe6fXQdWUOjaP/RLmdt2tI7akPeaqeS+HBy+KGtUyeVPlPI99o1PTSkbGJnDl+Z/hneMvXmXftd7b98MePnjhWz7OrMwkeJYuPXuePvRbzTcv6XHzhz9M+Lw39Nu++H/d/7fvijJTT8wKwzs3esXZe+suDpQ69+8NZjl1d9Oe9jPnVH6/DD0W3T+y+HTq14s3/IM72p94lTb95Syx4Z2O9dXbj0VEVP78e3fvXYhdue/dR+uzg467mTC1psx+ccr6g8Uex5/z5+8brt9xtTvn08cfLsC79/t/Pc+p9O3cWf7q24k9n54vbNj777yqcVrg2Xh75VB5dtffAVXdr09W9dJ+pqvNyPX21qm33VOWflN/dvfenor3efaaaHBw5SlecvbXnYefpAYf3QHde++3W+w3Nvj/+R7+efO8kGTnStHdAf3Vc5d5rrgwvnFgeLk6btPR1svFrZel5xb8lv+ydwccb212yr99YMftQc3bHv7D1Xb7IqN2XSnufjZzny/C/FVElN
|
1
docs/cassettes/chatbot_19.msgpack.zlib
Normal file
1
docs/cassettes/chatbot_19.msgpack.zlib
Normal file
@ -0,0 +1 @@
|
||||
eNptVXlsFFUYLxCOYBHkUIiJrhuUKLztzO7sWRC225YeltYe9DBQpjNvd4fdmTedY7tbCpEifwgBmQhBYjRQll1TF2ihICCXoAHDVYIJFIKAAQNiCHIJSsE32620gUn2eO/7vt93/b5vmuMhKMkcEgYkOEGBEs0o+CBrzXEJ1qtQVj6J8VDxIzZaUlxWvlGVuK73/Ioiyq6MDFrkTEiEAs2ZGMRnhMgMxk8rGfi/GIRJmGgdYiNdSxYYeSjLtA/KRpfhowVGBmFXgoIPxkpsMUk28BGDQPNwunGKwSihINRFqgwl48I5+IZHLAzqVz5RARQCPCdwuqaA70j8KysSpHl88NJBGeILBfIizkVRJR2JMNkXxv2QZnGmv6aNivqRrGhb+ke/lWYYiNGhwCCWE3zaZl8jJ04xsNAbpBXYikMWYLI2WmsAQhHQQS4EYz1WWhstikGOoXV5xnwZCYlUikCJiPB5caueG8AFERStoxgH4c7PKIngMgsG0kQ5TERbGMgKzQlBXDcQpHE8MTEp/76vQKSZAAYBqRZqsR7jLX11kKxtKqKZ4rJ+kLTE+LVNtMTbqO197yVVUDgeanFPyfPuUsJn7iwmkjTZ2/sByxGB0TYlG/FdP2OoSBHAIIyhbSBiDEIBDmpdd2prGW9tHT+tsMHvqcxx5gTyI3W5M+u9DmSupCKqku+F7kDIVmcOMxa3ryriqQgA0m62UzYbRToAaSJMpIkEBYQNzSyt5vlsiSVKcu2WkCzM9hZn2RpVNuw2ebK8uWpxGKrZJmivqfLlN+SZzfXVWVnzTTWW8tr5kpJXH+IspllyWUEpXdiQW1BblRNqyDTg6NQQx07Ls9I5ebzfX1vhRrxMVBSFArPMNntpYaAxhOrN1tmIMOXkRnJqCnx9wiMsFkCkIrQRlIPQny293AhCwaf4tY2khfxGgrKIBwYuieGSKarcHMU8hMePxlOT01Jc+IzC46LZmJPavlyJm2Iw2w1lUDSYCTNlIJ0uyu4iSMPMovKEJ+Wm/IUUbC+XaEH2Yhrm9FI+zvhVIQDZVs8Lyb5PJzvupB4+nlEAwyKSIUhFpSWqQGnPygD52dt7JgsgyUcLXGPSrbYvyfqGxnADy6gs6w818ISzkbJwdVBlvB0pE1FCuhscEOBlbSNFObekJL28a8W5EoAkAEHuxqPPMXjM9GREJClAhgxeUkpE65rC02F9xqZZSKvFhgufaeAEJqiysEyty0Y8ZqacaRAlGEQ0uycM8L6AQY7ncGOS36kFKGtRKzbe9byCggIQr8o4lWwrsb+vhgR1fD2JZzCU0+nc+2KlXigLVnHanXv6a8mwbzSkmZd3Pa+Qgmgh5ES4VxtwrNY1ER9qIYRmliZYr9dKOhmWctTZdXYyBGFlocPr2OrJBR6a8UNQluSfFs+unuUuyvfsrAJ9iQSKxZ4XRFxAssB5vbEyKOHGaK1MEKksXpYSjGGsUne11uFgnJAgGNrBWu1OwmYFWXgN9aL9T7uovmmTb4rFMb2dgu+nAdvfXD4sLfkMwp+nT5UP24ULM9Ivdo/7EcxNHJTKj90v7ThRsnFY58Abv60nTk4q2PNAGbty9Zy3bh94bZJztku5cvuNS4cXXD2WPqMri3evq1nO1axtevz4Bo22PXn8sPvI2cftU39+kv9g5MVHTU/vFYGW5pUHOo9++8fJkjOusYWLokUzwPg5Z5rW+G6qOwuGtmwYOGd1xeaJE95ecd9WDSqrtx3/XJErzrIjtp0/dDc97TL8goxn3P7s/CHpr2XU0cTk8gsXRn88/KqxhdoOT8/9pc44IuKenn3pxOvzhm+oPdg+5pzbvchUcnNMs23IOvewmvQP5ma27Y92Xj83+p7mWvy16/re5cunqXfHDDzrmjdg9sHGVefN4+9MSp969Nbll776wT8yc/zhEa9mLb01fe+ICW1/Vh3xPDq5e/Ki3TtC5TsWr2y6hsLcvmv/jCqxPDq3ammNtW3thIOnruy5OH1oGb8+tmZJJ7p3miiamWntVoyVX05+d+L5oofd/z799OXBy3auvNSUt+JRYus7S66Bq52l9aeaT2zo2H+oc0jm+4ngyGu/X3zlvusSOz+0rnuw3plBaX9faJq3f2Ba2n8fyFxp
|
@ -1 +0,0 @@
|
||||
eNptVQtsU2UUHhAFgghGIaATmgkhkt32PvrcnDi69+heLXRzQPf33r/tXXsf3Hu7tZ0gGwpBInJDTJgxZi9a2MbGHPIYm+IDIg+NKAlOEwUVDAR8gBEDhvm362TLuEkf/3/O+c7rO+c2xeqgJLMCP6Wb5RUoAVpBB1ltiklwQxDKymtRDio+gekoK7U72oMSO7zcpyiinKHTAZHVCiLkAaulBU5XR+hoH1B06L8YgAmYDrfAhIdfb0jjoCwDL5TTMjTVDWm0gFzxCjqkOZHFMlnDhTU84OCKtHRNmiQEYFwUlKGUtnEduuEEBgbiV15RwSitAVOCkluI6/LolkC/siJBwKGDBwRkiC4UyIkoG6QYx8K1po0xHwQMyvWHlLkdPkFW1J6J8fcCmoYIH/K0wLC8Vz3gjbBiuoaBngBQYCcKmoeJ6qidfghFDATYOhgdtVIPAlEMsDSIy3W1ssB3J5PElLAIJ4s749lhqCS8oh4qRUFkF+rKwqjQvIbQ6s1a/GAIkxXA8gFUOSwAUDxRMSE/Pl4gAtqPQLBkE9XoqHHPeB1BVvfaAF1qnwAJJNqn7gUSZ9T3j7+XgrzCclCNWcsmu0sKH7ijtAShNfVNAJbDPK3uTTTiyARjqEhhjBYQhtqKR2lB8LNQHb7lctEel5vLYnPCTnMhLjGVbkj5OInJztWuzinRRkyGiuKCgjUhI12l2ApL6iw0RphIk95AWgw4RmhxLaElsDAw2X2kiDvloAngtVSEBREn79/gYp1aJ+736yvZIodeWK33sBF9bdDoL7MXMIqXcwHnylre7rBW2L3a8lqbuyzfH3mZDRW59LmCN1ODogvWsUxWIW7MM5TlyeaSgpVrPKK53pbDG+RscpUrVFwuiMoGm5KvDdmzy83CuPCMlBHDkxEacb0Zjz89Y9wIQN6r+NR2gqL2SVAW0cjALVFUMiUoN3UgHsJzn8eSs9NWWvyAwvM6chAn1aE8iU3XkCaNHYoaEif1GsKYgZMZFKXJtzm6rUk3jodSsM8hAV72IBrmjlE+RvuCvB8yndaHkn0oTnbUyXj4aEoxGBIFGWLJqNTuSqxidGlghTn9o5OFCZIX8Gwk4VYdSrC+PhKqZ+ggw/jq6jncEtFTrBsGac+hpIkoCXE3KCCMk9V2PYX3JCVjvOtEuaLO4xhOHEOjz9JozOLJiIKkYDKk0ZpSwupwOgdC8RnLoggDZUSFz9SwPB0IMtAedOcIHGKmnKkRJRgQADMQwtC+gAGWY1FjEt/JFSirHQZkfHSygiL4IVqWMX2irfiH4zUkGMePJ/EARm+xWAYfrjQGRSEVi8kyMFFLhuOjIUhOPjpZIQnRhsvdoTFtjGXU4SXo4AIMIE1m6KYMNGM0EJQJZ0jcbKDceo+BNHmYXmseZgW0D2L2BP/UWE5VSbat0Hq4EhtPJKxUHH1FxHhB5lmPJ2qHEmqM2kkHhCCDlqUEowirIrtKPWSmLbQbgZvcHr0FN1qwlWgNjaH9T7uO+KZNvCsao/F28t6TUzYv3jEjJfFMQ5+REaX845KpxNyh2/N2rx9MaTw5W91RPqeobXr79u2ndr2feWP+9822zFn8jBF+QLNjus52d/2P5xp6jy/aTB57p/FMEdbcdePSVb520U357/v/3Kox+ZcLumee/ujFA7+fPfLB9fxHdBf3nO6t3dPXlf4GyPiqOR9bENsZO/0Xaev5urDY/OX5vKry0zXkqukV95bOyft5SdbyYuPCRw1b3lqUcqInqz3/zJ/VX9RcGMlN3doyy9H/zXMpV4bzcq7NXTXX1nb32WZiZoW/YdNlzcE7tsVXd1HrZrzyhNB6+btp7LYW6+BL+00LqrY9dRs8nrf1jxPV4kCjJXX+ydjFX7te6LneQjs6D2T9dG93/dncK+GFO/+d6VpItq4NXWh8/u5jn7xdO7xxzqX+m/vuP1m9+r11S2dd1nWYxXdTT+3/zZlBNqcWgvOHP1127dW23J1tb872ZnVtWWHNNdUo5HDU0Tqzie8jHS29g6xfOP7tnU2JMk9L+WztL8uCU1NS/gOzpzPh
|
1
docs/cassettes/chatbot_21.msgpack.zlib
Normal file
1
docs/cassettes/chatbot_21.msgpack.zlib
Normal file
@ -0,0 +1 @@
|
||||
eNqtVWtsHFcVtuOSoraogRIaXmViQEaRZzyzM7vjdRKi9Xq93liJHT8SOyFs787c3Z3dmbnjuTO7Ow7BqQsqNAE6FfwhQNrY2S2WYzet1QanjtJWfVhqi0QMqus2lD6UIiigkKC0RAp31mtiJ5H4w0j7uOfxne/e890zw6UcNLGC9OpxRbegCSSLLLA7XDLhgA2x9b2iBq00kkc7O7p7RmxTmd+QtiwDNzU0AENhkAF1oDAS0hpyXIOUBlYD+W+osAwzmkCy80b10L5aDWIMUhDXNlF79tVKiNTSLbKobVPWU7E6jWpGCaa2nqo1kQo9u42hWbu/nrou2ItbT7WhPCUBnYpRAGMFW5SDbMpCMnC2LMdYdAKSez3QLkK0DlOaQ+lAg1v+Z+F+ZJvlUErB11NIQ9W4RqDec/+/WOwlFg3JUPVMKcOiBURriq54kTqxceQXWyYEGlkkgYohMVhQM0gjLdv0kFhG3F9KQyCTNp+rWjOaRthyJ1a2bhJIEiToUJeQrOgp93hqUDHqKRkmVWDBMUJZh2VhuGNZCA0aqEoOFhez3MeBYaiKBDx/QwYjfbyyRdpyDHije8zbG03EoFvuVAchEYo1dDpEYzrFMUIjwz5eoMl5KbpKNEOrgPApGmX/qeUOA0hZAkJX9OsWF5Mnlscg7B7bBqSO7hWQwJTS7jFgagHhyeV209YtRYNuKdx5Y7mK81o5nuE4RjyxAhg7uuQeKzfi6RXJ0DIdWkIEw32ULUoIZRXozl+Ix6VkPKFtbs+nw7siwUg25iRaowPJRuTbJTi2FUvCUDYXSPgKEh9K9Tnh3izNiT5RCAQErpHmGJbhGI7eygZQtKtf01pMme1sFfkc1ncmO5oDg7ZcCDHh5mSr3VGAdgsDxd19qVi+zecb6G9uzjC7+Z54xrTaBnIKz2zH3Vu7QHu+dWu8L5LLb6QIOzunyJvb/CDSpqXT8d4Q0jDbuy2X3e4LiF3t2cEcGvD5dyKWibQ6kd1bU8vosTxPsxWGAVZoZL1nYkkbKtRTVtod4Rv5x0yIDTIt4P1FcmSWjYdHiQ7hKy+XKlPjaEf7NQmvHW0hmnRnWk2lnvKJVDc0KB/rEygu2CSITayPim7rGQ9XyvTcVIInekyg4ySRYWRJ8iUpbetZKI+Fbyr2GU/spJMefXJHaVgwEIZ0hZU73kd3Lc5LOtby5OLNopGZAroyWC7rzpRVnx8s5GXJluV0Lq+xwUGBVxLQlpJTlRTDRF4ZQojWsDsiCvxExbOkuzGyV5bmWJrlfkOuviKRa+ZtxkCmRWMokQltOe58vQYK3h3bzHN+PkAOfiOl6JJqy7DbTrQgjSgTb6QME6oIyNMFmswLqCqaQhpT/q5Mf+yO+knyyRsDLJSF5D1REsptZU8vjzChh+9t4hqMEAwGn7l50BIUT0KCgji9MgrD5Ww4n4ZP3hhQgTjK4vHCUjStyO7818giDllWhEno5/2JhD8pczKAAAQkISFLjaLo4ybDrXQYSGlId5f155Za+reHtsXCT/XRy4VEdxiLb8eSjrCuJJPFbmiSxrhjkopsmQxLExYJVleo351qlIKkrpRkE4IvyAb8dDMZQ0to/5XdqDdpy6/J+4peO/XUC9WHvnLwk1Xlp4Z8rl613BCaY9d8/+8frf3qEfH3ybWfef32zO1tP6hp+1b60w9+KfXwHX8pPPOvFqv3w6HobPGw+fY7z//7yovoV0fuqor86I37fTOf4wuHz5374/7CptPOlYsDh+I/iX/3TfEl9PTF906fmnY6D88+ewkJF96Z5jYVDjb/lXn9lrnhs3Pjb/7M2fHeKXV1jZiu23P+668Gv/zyjumFv12w9hTnF154i//8ocydW+6qOnD+o1+Y06uPzN3z1OG74Zl19gOZoZlbHlm1ENvR1Prg0V8+9uptn9oYEqNDb3MHztasipx89+PV34w+dE/NcK7uz7P2ms1fyMye+WH08vMj2pnfVYfWtSzUfPDzqHP14/sOfLju0tToF4/vvfhP/4n3X7t14t3nJgfbf5yIf2LOfeunzG37/Gfxqn9k9eSVVP/7NXd+e8OfaqaVe098duQPV/vAzvrOvqm9l564fOTXmU5h/vLYXns2OrHnVnVy/Xm6d1Pk2Uczs/a67zzSPn988onp1w6Of2MhvyFYN1TtHTk5+Ma1vx1cVVX1Hzp3kQM=
|
@ -1 +0,0 @@
|
||||
eNqtVW1sFEUYLqJoSvzAxIgadbkYMNDd7t7tfVFQS4/SE653cKUtohxzu3N309vdWXZ227sjmBS/JQHXj6ihSKTlDkspNCAgUCMYkUT5iD9MqqgxMX5i4kcUEyU4e71KKyT+cX/c7cz7zPO+M88z764vdUKDIKxNGkCaCQ0gmXRA7PUlA66xIDEfL6rQzGC5LxaNt/RaBhqZnTFNncytrQU64rAONYA4Cau1nUKtlAFmLX3XFVim6UtiOf/JJHmtS4WEgDQkrrnMyrUuCdNcmkkHriY0gwnPUpkFOOmqYVwGVqAzbRFouNbVMBOxUFGwg5zBNOEuRgIaE2YAIYiYTB5bjIllkL9/PM1oENDl/+Zqo6XOIoyaZzSgwvv/M/cKbBllKIOIUwL3P6V5hM6oWIaKM5XWTdbDeVnTMpLYwWp0VqD/xDQgUOkgBRQC6YQJVZ2qRYEOF8/515UyEMhUy8+rburLYGLagxP12Q0kCVJ+qElYRlra3pUuIL2GkWFKASbsp0VrsKy+3Z+FUGeBgjphcXSVvQfouoIk4MRrOwjWBiqbZM28Di8P9zu7Y6nimmnvi9Ii6sO1sTw1ksYInBjg+D05lp4Y0hRqDFYBtJ6iXo4fHh/QgZSlJGzFpHZxdPHgeAwm9vYIkKLxCZTAkDL2dmCoPnHv+HnD0kykQrvUELs8XSV4KZ2HEwTOPzSBmOQ1yd5eFuLAhMXQNPKshCmH/TpflDDOImiP/JJISKlEUp2PQvm2QJg35PYk9GRUQ65fyC0PNXMFv3fZ4qam1pxPWmFGws2dQYkV/G6/6HUHvTwrcDwncAKbB/54xq3zbcTyA77DU0Cg0KZl1yRQG9fGZ7NiO3qwRcTLxRQqiB2WLxuLN8lmWk2AtgUdWrylYVk8zS3tiCRji7KFh1DuwYS4EKfrGFqd1Ynk+WHe1+iNNZJAc9OC1pQe6IqENC+pdy9J5BYvxbq5JmIu4nLx+qUBPK48n8fH8pUKfbwY4J1ncMwbCtTSZsbu9Xh9OwxIdNoS4GNFemSmRdb3UR/CD0+UKq1hW3TxJQvf0heinrSHGw1Uw7j9TBzqjJt3i4zgm8u753pEZlGkZaChkqblihYcajGARlLUhgvHLF+SMpaWhXJ/wxXNPuyYnSrplE9vKQtzOiaQrVRlD7Szy0abIhsO7R29WSw20kBDhXJae7js+q5CrkuWLFnOdHapfLAgelASWlJqX2WJbmAnDS2IVYnd6+b9g5XImO/66V6p8jzLC2/Rq48kes2czejYMFkCJdqGzbw9UqOCnHPH5nsEr8dHD76OQZqkWDKMW8kQVqkzSR2jG1DBQD6UY2m/gApSERWm/Ftp8cTu89LFBy8HmDgL6cegJJZl5d8ejzCgw+9s4hKNGAwGj1wZNEbloZCgVzw0EUXg+GoEt0oOXg6oUGzjyUBuDM0i2R65hw4SPAjKYtAPgEcSAm6P2+31BIHAp2DKJ1LnBHc3NLINQMpANl72n10KrWiuj4Qb9rez443ERvXRT2BJw0RDqVQxDg0qjN0vKdiSabM0YJFyLatfYe8LSEEpGaBiJem+eF+QXUDb0BjbP7brczpt+VvYXXTk1NLvTYrdveG6qvIz2YwdbT72wE3D5+f41havro4M+Znv6yMv5s5Mb9j/Y6z1QN3mLaprxl+HR7a/WPjJ/dT5r4fzv26cJj3cOi995OzUNl/jex9YM7d2rIneeHrnyV385sMvH/gNDC1ZtfecvenIVRtq7/1soZcbEqp7QuuKi6ce6+7akgr7oi9sNu96esqc2KrWO249fRyde2jl6nmvzkSls7013blT97mOT6978jvP99WP5k7N7J36UZW9o/ubCzdLZ7f9ct2toed7zmzcNCg/80n3DUNfDL25ZMOUV+7Zfyw57+zePdnwHz2Hlve/G70AnnhzqD32/vk/mjvCf+66pn7HhWlGJrc6Ok3a+uXR36MprcZ3XL3z0cd+f/K7c8r7u1+64cRG8u3jt7+z4aL4aXXPc89OufNnMu+NH3Ye7Ts9/bWvTObak52zrOs/br9t9tTlPdsS9OguXpxcVf3NA0nhqqqqvwGQFmWF
|
@ -1 +0,0 @@
|
||||
eNptVWtsFFUUrhKDwQioaBqiOK7GB+lMZ3ZnX20QypayCywt3WJblCx3Z+7uTndm7nTunXZ3CRp5BER/OFHRKKCUsmtqpfJQeVgUomiUxGiE2BDAR4yCKEgMGoLine1W2sD82J17z3e/c+453zmzstgFTawg/YZ+RSfQBBKhC2yvLJqw04KYrC5okKSR3NvUGGvZapnK0PQ0IQauqa4GhsIhA+pA4SSkVXcJ1VIakGr6bqiwRNObQHJu6MJylwYxBimIXTXM48tdEqKudEIXrnZkMcCEDGDSUDWSlsoAjBVMgE44pk7H3dBkgKoypVgcSoYghqQhk6BrBiWZHLIoIqGoCslxrirGZSIVOsw4hwnUXCuqmDEO08p9TOQhjUmgxGi0haHpWrGU7mhIhqqzlTII6+G8LLHMBHKwOt0V6D8mJgQaXSSBiiHdoH4MmjoKdLh4zr+imIZApok9WTG5N40wsbePTdYAkCRI+aEuIVnRU/bbqbxiVDEyTKqAwD4asA5LpbD7MhAaLFCVLlgYPmW/AwxDVSTg2Ks7MNL7yxdkSc6A15r7nNuxNP86sXc30iDqItVNOVpVnRE4McDx72RZmnJFV2mZWBXQeApGyb5/tMEAUoaSsGXF2IXhw9tHYxC2t0WB1BgbQwlMKW1vA6bmE3eN3jctnSgatIuhpmvdlY1X3Xk4QeD8O8YQ45wu2dtKhXh/zGFIzBwrIcphb+ELEkIZBdpDF+JxKRlPaDOU+lxrIMKbclsCetKaKdfN4RbXL+Tyfm/z/HD4saxPaifRyMKuoMQKfrdf9LqDXp4VOJ4TOIHNAX8s7Tb4Vmz5Ad/hySsg36pnOuNKK9fKZzJimzKvRUSLxaSSFzssX6YpFpZJSouD1tkdeqwl1BxLcYs6oommuZn8EiU7Ly7OQalahkZndSnyjAjva/A2NeDAwvDsx5JGoDtar3txnXtBPDt/ETJIZ5TM5bKxukUBNCo8n8fH8uUIfbwY4J1n+4g2VKinSNre6hbdb5oQG7SZ4KoCTRmx8MpeqkN45LNiuVF7GudflfCdvfVUk/Zgg6lUMW4/E4MG4+bdIiP4anh3jUdk5kZb+kNlNy3XleCOFhPoOEllOGdE8kUpbekZKPeFriv2QUfstJJO+LRLWZg1EIZsOSq7v41tHp5QbKR+13BnschMAV3Jl9zagyXVd+ez3bJkyXK6q1vjg3nRoySgJSV3l48YJnLc0IBYDdtbhYB/e9kyors+eldaeZ7lhb209RWJtplzGQOZhMVQojOR5OyhKg1knR6b4RG8Hh9NfC2j6JJqyTBmJeqRRpWJaxnDhCoC8r4sS+cFVBVNoYUp/ZbnLbZ7vfTwnmsBBGUgncxFsVRW/sBohAkdfucSV2nEYDD4wfVBI1QeCgl6g/vGojAcHY3g1vCeawFlih4e92dH0Kwi20MP0EU8CX2eRNIneqHb4+eTHi8Q3B6vIHgTohiQeO9AqIENASkN2VhJf3axvn1hXTQSeq+NHS0kttEY/h4VdYR1JZksxKBJC2P3SSqyZDosTVigXM117fbugBSUEgHeyyf9IMj7guxsOoZG2P6XXa8zaUsfpqcLTjn11Cc3PHHvszdXlJ5x5Pk6tHLW5DXnLq8/vfngw+HwXvHYNHXBHeeiobsqO3/bGepe25a5fO7WdYlJg0b21IFPX+u+ULF6+pwJPd9GuDMN45+FPSe7Fu/899Ijk75IX9k5VDXp1zXChWUvnq8d/9Pyqr/PvDqlxT+13d773aEe+WxzKvLKkR/PC5v/eqNwu5V6a+rEnsjdRgZPKOzip22sbb6pxj60f3yFcebDpXNjP7wfBmfiL03d9MKd6/+8ePOstq83uXrx5zvEqsu33SVXRuPL1yx5YdKXL878aP3EpvC4AaPzoL1k2fSf/jh9Uus8XjP0rv/tY6uePHD0qxbvBtN//rUDixv2Lk0uqnGfGLgndn8+f7Hi0Uu7js98ed3h5hPG4T+fKdzy3KH5GyvXTvte9c16cEP+m5+/eqrQX79h3v79pz/e9rr7aCU/+NLZgSm//1L5q/rWgjXv7eiL7zz1w9l/bqyouHJlXMWMYGBLmL7/B9ecUAI=
|
1
docs/cassettes/chatbot_28.msgpack.zlib
Normal file
1
docs/cassettes/chatbot_28.msgpack.zlib
Normal file
@ -0,0 +1 @@
|
||||
eNptVQ1sFGUaLhI8E0MEcxygEqbFCGJndmb/t00Tt9td+kMpdgu2Ndj7dubb3Y+dmW86P9vdIpdYNKAYdRQRA2IPtrtYSqmCxQNr/EVzhUu86F0KKJCeJ8JxdyoceBxw32y30gYm2Z/ve9/3ef+e952uXBKqGsLylD4k61AFvE4OmtmVU2G7ATX9qawE9TgWMssbwk07DRWNLI7ruqKV2WxAQQxWoAwQw2PJluRsfBzoNvJfEWEeJhPBQnrkH2tKJKhpIAa1kjLqsTUlPCauZJ0cSlqwQelATFAiSkAKUApSgQ4Zyi9rHVClgChS+TAsNErHlB6HVIScKRyl0tggGhEkIj3NlJRSJSoWoQWqpTUdSiVrS6lJvqpRMVWzUKJqkTRJ3dCgWrJ2FbmRsABF6yqm6LQT0xKSkaUpkzuO/Gq6CoFEDlEgapBcEDcKKZpuqBYSy3jW5uIQCKSk3xTNyMSxppv9k8u0F/A8JOhQ5rGA5Ji5J9aJlFJKgFGRZN5L4pVhvglmbwJChQYiSsLsmJU5ABRFRDyw5LbVGpb7CvnRelqBN4t7rdxoUnlZN/c3kCD8NbbladJPmeIYp5dhB1K0pgMki6RBtAhIPFklLz80UaAAPkFA6AJXzOyYcf9EHayZPfWAbwhPggQqHzd7gCq5nfsm3quGrCMJmrnA8pvdFYQ33DkYjmM8b00C1tIyb/bkG3FgkjHU1TTNY4Jh/p7N8hgnEDRHfmxr46NtEamiriMeeDToCyZq0pHQkvaoF9sfdaYNvSYK/YmkO2JP8Q5/rDkdWJGgOY/d43S7nZyX5hiW4RiOrmXdeEljiyRVqQK7PORxJDV5ZbSh0t1pCCk/E6iMhoyGFDSqGOhpbY7VdFTb7e0tlZWrmVZHU9tqVa9uTyIHs0wL1zaCuo5QbVtzMNlRTpHojCQSKqpdIFgtxeNtK/xY0tgV9cnEMrvb01iX6EzidrtrJWaZYCgdbK2NTQiPdThothChm3V6WevpH+eGCOWYHjd32h3uXSrUFDJLcF2WlEw3tK4M4SE88nmuMKI7GupuUHhWpopw0hwKqaiUsnuoMFQoO2t3UpyvzOkpYx3UkvqmvkDBTdMtKfhWkwpkLUpoGBynfI6PG3ICCr2BW5J9yCI76aQVPplRGqYUrEG6EJXZ10w3ju0muqZq39hk0ViNARl15t2aQ3nWd3SmOgTeEIR4skNifZ1OB4pAg4/uL5goKrbckIBoSTN3etyO/oJknHe9JFeW5lia5f5ARh/xZMysZBSs6rQGebIN9bQ5UiqBlDVjFQ7O5XCTwpdTSOZFQ4BhI1KFJcJMrZxSVChiIBxM0daGE5GESGPy34VNq5kZFzF+92YFHScg2ck5Z76t7PsTNVRo4VtJ3IBx+ny+926tNA7lICo+l+/gZC0NToyGs0vauzcrFCB2sFpfalybRoI5cj85tEE3x3EwwgHgYV3QaxeAxytE7IKd9fp8wOvdGwjRAcDHIR3O88/MVbUs89fXBAab6YlEohuUsTdRTsaajKLRbBiqpDFmLy9iQyDLUoVZgtXobzH3e3kfZFnB5Y0Ar491u+hKsobG0X6hXcbatPlX0pNZq51y7NMpB+ZvvKMo/0wln+vXdfOofJyd8dS/Op+dbhtqfprevfl03anMoj/vWN897b8bP+rueeWTLX+7+8SK1gX/PjR707zyigsfffxE+bqRb8O3dy/dubL77ON157uO/vWAB1/MzTd+On969NCVw1+vfe3ksYuXzl79+upoa/1L6wbvah7ed86oZ5p/Hbi24dyGVrbnT707Fs/e+Pai0Kld/q3iqhffy5yYi3+MtG76ef8fY8MvH1oYXrB71v+OFBVtP+y5eGH6tUce2rJs+6z1Rw4G3im7a8quaveDtY57f3M+WM8MPGA789nZc9v/Q1WEtn3wq9H+U4m90wa/uPw8eIHatGhBddWqS3PQby/vmbag7ePp2ZmhE3Ovr952qnjaM4OfP0yf+fvMoefuCd4xZ828OVvuq32jND5372nt4aF1xUs/QB9e/Wq4d+bxJ42yV+f3VXy//qd/bj44Aw0v/N1fHhudfmnrlxueOPby0133fF889bLrm9Tl78TFVarfXVl+4UHl8ftGv/w2ZhvY9nr3i/E1X+z++frSrbcH7x22XauZd2XuyVW3Zb97aXTzs5dKz7j3/DDv8IyjPxw/NoBexYO7Zn/1dtNI8ZU7re5MLUqffObNT24rKvo/8WSElQ==
|
1
docs/cassettes/chatbot_29.msgpack.zlib
Normal file
1
docs/cassettes/chatbot_29.msgpack.zlib
Normal file
@ -0,0 +1 @@
|
||||
eNptVW1sHEcZtmMiDFQlrWgoQZDJFclV8a539873kchY5/M5/mhs43MS2xTM3O7c3eR2d9Y7u+e7C1HapAVUq4qXtkgkLVWwfZecTNIPK23quBESbhtSmhaIKicFEkT5UKlStWkDKqXMns/Exrkfdzczz/u8zzvzzDv7ihlkUkz06mmsW8iEssUG1NlXNNGIjah1f0FDVoook709sf4J28QLd6Usy6CbGxqggXliIB1iXiZaQ0ZskFPQamD/DRWVaSbjRMldqH5jt0dDlMIkop7N4Fu7PTJhuXSLDTyDxAYWVNNAxWkEIDCwCS3Eg7BOR5EJoKqCsg6XDlgEWCkE4mwMSALkiM0QcaxiK8d76oHHJCpySWmOWkjz7KkHK3K1402go04DnVhbAbcpMleBwymSc7OZqN4N2AR2suJA3MR6koIcqmih7I+JoMxwxKZglGk3aTNoQS6EIpTGel0ZQW2XiFWKqCsdKhmWpzxHTGAgMwUNCihxN9sNQSpFzSBmIJgGtlEPNEac27RcNKQUUwsysf+vvCwUU6DlgA411Ly61G+zGY0oSHWnkobF+QinYR27SJ3NieyXuqo1NkhApoVNsB1lOqErms0KfGBPMYWgwur9Q9W6yRShlnNspSWOQ1lGjB3pMlHYvjk/T+YxK0ZBCZXVU2KadVQ2nFNKI2RwUMUZVFiMcp6EhqFiGbrrDbso0acrNXJWzkCrl0tubRwzmW45Mz1MRLijoTfHvKsDkfcFeeHJLMc2DOvsCCinQqanYJTXZ5cvGFBOMxKuci+cwmLwseUYQp2pbVDuia2ghKaccqagqfl9zyyfN23dwhpyipHe1ekqi9fTeXlR5ANPrSCmOV12psoH8eyKYGSZOU4mjMM5LBRkQtIYOQvvDQ/LieG41tQ1morsjIai6Y5cvG3rSCJIpJ2+nG11JFA4nfHHpazsDScHcpHtaU4MSAGf3+8Tg5zIC7zIi1yn4Cdb+wY1rdVUhN62gDdD9R2JnhZ/3layYT7Skmize7LIbuVRYGgg2THaLkkjgy0tu/ghb//wLtNqH8lgL99NY519sGu0rXN4IJoZ3QKYOjuDlab2Rhht11Kp4e1holFh+7ZMulvyB/q60vkMGZEadxCBj7blokOdyWXyBK+XEyoK/YIvKLifY0veUJGetFLOhC8gHjERNVjbQPsLbMssm+6bZD5Er7xcrHSjn/V0XbfwbZOtzJPOXJuJ64EUADFkAEmQfEAMbfYFNgs+sHVb/3Skkqb/hhZ8qt+EOk0wG0aXLF+UU7aeRkopckOzz7lmZyfpymd3lENZg1DEVVQ50wNc32If5jpan1m8WRwxk1DH+XJaZ67s+tF8dlSRbUVJZUY1IZT3eXEc2XJiphJimMRNwwRxGmWbE5SOVVaWfFditQqcKHCCeJJdfSyza+YWYxDT4iiSWee3cs5CvQaz7h1r8oqNXj/b+C0A67JqKyhmx1uJxpxJtwDDRCqByvNZzm3mKtYwO5jyd+VVoc5kIwt+bjXAImnE3p+ir3yswgvLESZy+d0irtP4QqHQqRuDlqi8DBIS/c+vRFG0XI0oafS51YAKxYSo0ensEpzDirPwNTYYhnIoJKFGJDYKSAxKolcKeYNS0OdvjAfjfkE6HmnjIu7jwMXKBnSKrYPd4W0dkRMD3HIncT3G4rNb1AnVcSJRiCGTnYxTklViK6xbmqjAuPrCg85MUA4hQVDi8XgQhQR/I9fC+tAS2/98N+m22vL7e1/BPU89OV+9d+NYbVX5U2M5zT1/FtbNf/S9B523P4Ui7R+eCH/OB6y1v76tNtotddx04qX3siHPzB8/fqT5H+bah8auHbp2NfHvL1XXnn/wKz9MjB8Wu+s+unho9uDQhg/vujj7nw+uHb96yxP9v9h79gD405nwB1/3Xz5rvz4+smG/Z/bSixM3Baf0X02/MuY5xye/eyTatJD/4o8eu3ViuvNp39mHZ+72XzzfNL5rfP7mn95Rde+5fz2ev7C++Zc3/+afXeLL9bu/MPvbdfe+O1kXbU/Mhy9Mvf/ZLz9d/c49e67MHYCtn9+x4f4Nl8M/uX3ghdMH3ng3OV7d6225g3sz0vXa4R/cIgu1NZ0bX3z7ypmvztacxVt2v5Y/f6BYOpj/69G/PPrE1e7B+chb35QWjq7/tBkvvFR7CX3jzOXT3APvK/ccWTN3+PE1z87fWvfqxpHvLyQeuG/f75J7Hv5x88nS+v39B0//fnv92kffOfeZvx9961ThklNzsBQ7sfXKqaFX3zxZuvid83eOmYfG/nb7xzVVVZ98UlO112x73VpTVfVfuvWw0Q==
|
@ -1 +0,0 @@
|
||||
eNptVWtsFNcVXpI0bV6KKxV+pKhMLQgh9czO7M6+DE5ir9/ED7wLhqKwvTtzd/d6Z+YOc+/Yu2tZEZg8lEiEIXLCj1ZKsdlNHEN4GMy7rfJQQ4La1BKtjRIlaSXUpkoUFKio1KR31utgC+bH7tx7vvudc8/5zpmdpX5oEYSNJRPIoNACCmUL4uwsWXC7DQndVdQhzWB1rLsrFh+1LTTzaIZSk9R6vcBEAjahAZCgYN3bL3mVDKBe9m5qsEwzlsRqfubqYLUOCQFpSKprua2D1QpmrgzKFtVbsM0BC3KAy0DNTNkaBwhBhAKDCly9QQagxQFN48qxuJQcxRzNQC7J1hxOcXlsM0QSaYjmheoartrCGnSZSZ5QqFcP1XCLHLain3Ntq3WuHekL0TaBVvXQU2xHxyrU3K20SXm/EOCpbSWxizXYrsT+CbUg0NkiBTQC2QbzY7LUMaDLJQqhoVIGApUl9hNP1VgGE+ocWpyst4CiQMYPDQWryEg7B9MFZNZwKkxpgMJxFrABy6VwxrMQmjzQUD8szp1yDgPT1JACXLu3j2BjonJBnuZNeKt53L0dz/JvUGeyiwVR3+btzrOqGpwkyGFBPJzjWcqRobEy8Rpg8RTNsv3MQoMJlCwj4SuKcYpzhw8txGDiHOgASldsESWwlIxzAFh6UD62cN+yDYp06JSi3be6qxhvuvMLkiSEjiwiJnlDcQ6UCzG16DCkVp5XMONwfisWFYyzCDozVxMJJZVI6nWoMd8bbhMtdXMS+jO6pdY3CRsbO4VCKNCzvrV1Uy6obKEdbZ39EYWXQr6QHPBFAiIvCaIgCRKfB6FYxmeKvcQOAbHPX0Cg0GtktydQr9ArZrPyZtQel/FGOYUKcp8dzHbHWlWa1hOgt6HPiMWjPbG0sKGvI9ndki38EuXaE3ITTq/lWHR2P1Lr2sRgc6C7mYQ7Wxs2pczwQEejESD1vicTufUbsEm3d9AWIRer3xDGC8IL+oO8WIkwKMph0X0OzWtDg0aaZpxRn+x73YLEZM0Eh4ssZdQmO8eYDuGHfyxVGnV/1/qbEl461sg06ZxrtlAN5wtxMWhyPtEnc1KwVvTV+gNcS0d8IlpxE7+tBI/ELWCQFJNh07zkS0rGNrJQHY/eVuznXLGzSrrhsy7lYc7EBPKVqJyJzXzP3ITi2xqPzXUWj600MFCh7NY5V1b9QCE3oCq2qmb6B3QxUpD9KAltJTVZOWJa2HXDAuJ1Uk7OoYplXnfj7K6s8iIvSqdY6yOFtZl7GRNblCdQYTOR5p2ZGh3k3B6r80sBf5Alfi2HDEWzVRizk41YZ8okaznTghoG6ukcz+YF1JCOWGHKv5V5S5yxADt88lYAxVnIJnNJLpdVPL8QYUGX373ETRo5EomcvT1onsrPIJFA5PRiFIELo5F8Ojl5K6BCsV8kE7l5NI9UZ2YlWyTCAVFNQVENgUhIDkUiUJLDEIhSIOVPplQ58Fa0mY8CJQP5WFl/TqlxS2d9R1v0xGZ+oZD4LnPue1QyMDFQKlWMQYsVxhlXNGyrbFhasMi4euq3OJNhJaIkw2IopQQiETEY4RvYGJpn+152Y+6kLX+YdhTdchrpd5ckVrz4I0/5uZPuudj5tli166v8C+9eufs4Gn1+W/eO6NaVe/Z86mkY+UKLDf/9gevNVRuHBv6x8n+f/X7VczfWvTwz0+RpuJK8t0Ff9eZR39f/Dk4/eO3l0+9/eSMy+FRw9uOL25bdU/cv77LhT1+79sNHHzspLB0Zqb+krp1Y3v7entoLe7W9D/ei2Z17LfkPm+LLX/JO78tOFXPh9r98cmrVwT+/uOZsk8dz5vpPa2eXP/3OT54dHu7Z/5+jT0h1j91RtfvJ7l3fbBt85cQXl6NNr3x04viVzP0P/OqND+7bNx16BIElv7nvofEda169eO3U5c+K132TU8elc+cfvyTF47svnb/SMjR68Prlkeg/3382MVhsn6QrlpydSv/1aeHHI6NHP186vXq2Ktx5Orz9of/e1TL6M98RefWQ79s/TV5wDgS/1Wo/ujZ9IvyD+19adfkXz+B1yqnZzNbX3vy18ao29beW37FMfvfdnZ7nlccvtN7h8fwfIDVPpQ==
|
@ -1 +0,0 @@
|
||||
eNptVW1sHMUZPoOiRialpHy0FW2ysZACxbu3e7f35SjQiy/+xJ9ncw4ksuZ2527Ht7uz2Zm17y51WkIDbUEKSyNQSioV4twFyziJMMEJMaJVqUwVoSJTkBspaqUK10WtaJGafqRKZ89ncpazP+52Zp553mfmfd53D5VHoU0QNuumkEmhDRTKBsQ9VLbhfgcS+oOSAamG1YnenuTACcdGi9/WKLVIk98PLCRgC5oACQo2/KOSX9EA9bN3S4cVmok0Vgu/r8MHGgxICMhC0tDEPX6gQcEslknZoGEPdjhgQw5wGtStjKNzgBBEKDCpwMVNMgZtDug6VxHjcXIUc1SDXJqNOZzhCthhiDTSES0IDY1cg4116DGTAqHQaBhv5NYEbEPbuPbtBteBjFq0Q6C9Hgt1HTd60G1cGx7jFGBy7VWBXmCmRQWFh2t5vlC/jizFLmc74YwCZwIDPrw++D42Y2AV6t5U1qJ8UAjx1LHT2MOabFZi/4TaEBhskAE6gWyCHdJiiWNAj0sUIuNlDQKVpfWK744JDRPqTq9N1WmgKJDxQ1PBKjKz7mvZIrIaORVmdEDhJBNtwooR3MkchBYPdDQKSyu73DPAsnSkAG/dP0KwOVU9JE8LFly/POmdjmfJN6k708NExNv9vQXmKZOTBDkqiGfyPLsxZOrMI7wOmJ6SVVl/q3bBAkqOkfBVv7qllc3TtRhM3JNdQOlJrqEEtqK5J4FthOXXa+dtx6TIgG65uXd9uOrijXBBQZKEyNk1xKRgKu7JSiLeXLMZUrvAK5hxuC+LJQXjHILu4j+Gh5XMcNrYiRKFVLRdtNWhNAxqhq3GdwuDiW6hGAn1d7a1PZoPK3toV3v3aEzhpUggIocCsZDIS4IoSILEF0AkqQUsMUWcCBBHgkUEiikzt38YpYSUmMvJQ6hjQMaDcgYV5REnnOtNtqk0awyD1K4RMznQ3J/MCn0jXene1lzxMZTvGJZ34+wOjqlzRpG6s10Mt4R6W0i0u23XoxkrOtaVMEMkHnhkON/Zhy26v4u2CvlkvC+Ka+SFg2FerCoMi3JU9J7pVW/o0MxSzT0RDEdO2ZBYrJLhkyV2ZdQhhyaYD+Gl+XK1S7zS03nDwndPJJgn3bkWGzVygQiXhBYXEAMyJ4WbxEBTMMS1dg1MNVfDDNzUgmcHbGCSDLPh7lXLlxXNMXNQnWy+qdnnPLOzTHryWZXyMG9hAvmqKndqiO9f6Y98e+L1lcrisZ0FJipWwrpzFdePFfNjquKoqjY6ZoixohxEaegomZnqFsvGXhgmiDcIuxwxMF1dWfXdJDsry7zIi9J5VvpIYWXmHcbCNuUJVFhHpgV3sdEAea/GdgalUDDMLn4Hh0xFd1SYdNIJbDBnkh2cZUMdA/VCnmf9AurIQCwxld9qtyfuRIhtnl0PoDgH2XehLFfSKr5di7Chx+8d4gaNHIvFLt4ctEoVZJCYHLywFkVgrRopYJDZ9YAqxSsimcqvonmkuov3scGwGoEwGgxDWQqJSiaaCYpSFEQDqhwLSZFITD3d3MI3A0WDfLLiP7ec2NMd72pvPjfE1xqJ77FWvoZlExMTZTKlJLRZYtxJRceOypqlDUuMqz++x52JKjElHRXTclSFMTEc43exNrTK9oXtJrxOW/ksPlHy0mlm363LbH1mo6/y3Eqf6+y5Rbrr8NVrP35p+olNR+qA8uy9oacSqYfSbQ+8eCx9+fHLOxq2XRtb+KM0ay28++9i9/s737rn+7/tlDa83PHeb4oL589/69PFhU/gzy77v3tpV+zAfw9u2fvqx19tvfOD7tsfm+r7+3u/G1Tn4h/96fahDUeffeCNyN+OPF343vNLoTMziZ/oh5fe//lLY7P1t/1FnTt4kL4wcXeLdPjqPb53pg8mW9/81Vbp8NR4f+Ov77/troXPt/s2z9+3e/7rjro8NHPvqXnpwoV/PjhYd3xmY9Mvnzy3b/bEl+9f/sOWDnnLuYFfTNbvTWvlr3z8YrwuNXh0+Y7UN8iHV5cyT9Vv1TYfuTZ1embh7PEvHXX3jmw+dvHU8tsbPuvz9/4v1vGv73z0+fHlO/fVX2p9OvqBfuWdK59tPDb005at2evHPulUX+26ONT3zf+glFy3eF4dP/nDy/Pa0oN/rd/00J+LHxafGf/0az7f9eu3+paWf7QpeYvP938yuHiC
|
1
docs/cassettes/chatbot_34.msgpack.zlib
Normal file
1
docs/cassettes/chatbot_34.msgpack.zlib
Normal file
@ -0,0 +1 @@
|
||||
eNptVWtsFFUUXh4qGt8immB03NQo2JnOzM7OdtrwY7t9bWu72C3SYkydnbm7O+zM3OncmX0UMAhIIkZ0UKL4AvrYJU2tGApFXlYJ8YVRiBL7Q40xPoIiBuMjVNE72620gfmxO/ee737n3HO+c2ZdIQ1MpEB91pCiW8AUJQsvkLOuYIJuGyBrQ14DVhLK/Usj0fY+21TGFycty0BVFRWioVDQALqoUBLUKtJMhZQUrQr8bqigSNMfg3Ju/O9VXg0gJCYA8lYRD6/yShC70i288HZCmxBNQIhEEqhG3FYJESEFWaJuUURQRxlgEqKqEsVYXErCgoSVBEQMrwkYJ3LQxoiYoipWjlB0ImqIuoKSlLec8JpQBa4TlEMW0LxryokZvhuVu4nwvRpRA2Mz4DYCpnfNI3hHgzJQ3a2EYZEcJDVFV1ykjvcY/I8sE4gaXsRFFQG8gd0YOImWbbpMNBVYU0gCUcYp/spzY38SIssZnpm2N0RJApgd6BKUFT3hvJ7oUYxyQgZxVbTAII5XB8WiOIMpAAxSVJU0yE+ecnaLhqEqkujaK1YiqA+V7kdaOQNcah5070biSuiWMxLBQQTDFUtzuL46wVBcJUXvzpI4+Yqu4oKRqojjyRtF+8HpBkOUUpiELGnHyU8eHp6OgcgZaBGlSHQGpWhKSWdANDWe2zN937R1S9GAUwgtvdRdyXjRnY9iGCrw5gxilNMlZ6BYiNEZh4Fl5kgJYg5nJ52XIEwpwBk/19Ulxbti2pLmTDK0vE6oS4VzsfqG7nglZJdzOdsKx0EwleZjbFbyBRMdudCyFMkE2ADH8xxTSTIUTTEUQzbRPGxo69S0WlOml9YHfGmkPxSP1PA9tpwNUqGaeL0dyQK7lgKBFR2JcKaRZbs7a2pWUit87V0rTauxO634qFYUbWoTmzP1TV0ddelMNYGjs9OKvKTRL9Y1aslk17Ig1BC9rCWdamX5QFtzqicNu1n/Q5Cm6upzdSuaEtPCo30+ki5FyNNcJe0+w1PaUIGesJJOH+tnd5kAGbitwPo8Tpllo3X9WIfg+PuFUsv2RpovSvjW/lqsSedwvamUE2yAiAKDYGmWIxihigtU0RzR0NI+FCq5ab+sBN9sN0UdxbEM66YkX5CStp4C8mDosmI/7IodV9INH/coCbIGRIAsReUMdZBtk7OKDNfumewsEpoJPAd6im6dw0XVZ3qyGVmyZTmZzmi00MP5lBiwpfhI6YhhQtcNDojUkJscfrhkmdLdIL4rTTI0STNv4dZXJNxm7mUMaFokAhKejlbOGS/XxKzbY0t8jN/H48RX48EkqbYMonasFmpYmaiaMEygQlE+kCXxvACqoim4MMXf0uRFTr8fH95/KcCCKYBndIErlpU+Mh1hApffvcRFGk4QhEOXB01R+TBE8PsPzEQhMD0ahtXQ/ksBJYpeGg1lp9CkIjvjZXjRxYHKSpblBF6SfBwdkCV/wBcXBA6wYkBgZfGNUD0ZEqUkIKNF/TmF2s7WYEs4tK+DnC4kMmJMfpkKOkS6Eo/no8DEhXEGJRXaMh6WJshjrrZgpzNSKQmApgHNCTFaoHk/WYPH0BTb/7Lrdydt8RP1eN4tp544Nitz11PzPMVnjvVsS+tRev6GP4VjR3+6cu+xf3YurP2od2z2k7feXOMsfLssv3k7qG5r/vyoceX1a9upjVsu9Jd/ITPbE+xv/E2e6JaWtQNPzFMUe3X1y18NpBYtP7Z+ouNcSnjspch24b0XJhreqdq7fsHZFzf2/cQOnFx57abjdxaurxur2hs4/cxfxvsTh2bdE/5sLzl69/fzX/hw1/6vz/y2v3rTqm/XHyS29iZuyT7t8fAfj/6ilA1/MffAq31tvX96G3hhyexHN1uv098cPhd8cayKfOCDuVzL71/+8MrIj2PPL7ht98Rr/6zeOndfTZg4tXU+RcwfOSKMVh9fXHH66vzJ8ztgZA333akT7971V/iT5x880njHNZH8hdEb5M7Q6E1jD+85eN73RGfDwHXhHfe8VPXrprPfdqw6s+CPnqtuv2/el6dX/7p9kXP7hpMXfk6rX0e2Hbrux9ea+Pu3dU2sBVsWPde3p/VMZNt42cIbFty3sY9JnODPX+Hx/PvvHE/Pp9wjHbM9nv8AoSZdwQ==
|
@ -1 +1 @@
|
||||
eNptVWtsFFUUXvEBGh8YUBIfcVxFBDrT2dk3FbFsn9SlS7dQCsH17szd3dudmTude6fd3VIf+H4ljhJj/IFK211tarVCfFI0qMQHP8RQTWM0YEx8kPoDfOAPxTvbrbSBSfZx7/nud8495ztndpR6oEkQ1s8bQTqFJpApWxB7R8mE3RYk9MGiBmkGK4Ox1nj7gGWiyRUZSg2yqroaGEjABtQBEmSsVfd4quUMoNXsv6HCMs1gEiv5yX/63BokBKQhca/itva5Zcxc6ZQt3J3Y4oAJOcBloGqkLJUDhCBCgU4FrlYnvdDkgKpy5VgcSo5ijmYgl2RrDqe4PLYYIolURPMc0rm4AXREMoK7inObWIWOE5InFGru/ipuju8MupFrXqZxSZycjbYINN3929iOhhWoOltpg/Jewc9Ty0xiB6uzXQ/7JdSEQGOLFFAJZBvMj8GyyIAOlygE+0sZCBSW4+9dCwczmFB7dG7eXgeyDBk/1GWsID1tv5YuIKOKU2BKBRQOs4B1WK6KPZyF0OCBinpgcfqU/QYwDBXJwLFXdxGsj1QuyNO8Ac82Dzu341kpdGrvbWVB1DZXx/KswDrnEXwhQXwjx7PsI11lFeNVwOIpGmX7+7MNBpCzjISviMcuTh8enY3BxB6KArk1PocSmHLGHgKmFvDtmb1vWjpFGrRLkdjZ7irGM+68gscjBMfmEJO8LttD5UK8PecwpGaelzHjsF8WizLGWQTtyROJhJxKJLXVqC7fEWoWTWVzEnozmqnU1gsb69YLhaC/raWpaVMuIHfSaPP6nrDMe4JS0OeXwn6R9wii4BE8fB4E4xnJEDuIFQRil7eAQKFDz3YnUIfQIWazvs1oXbsPb/SlUMHXZQWysXiTQtNaAnSs7dLj7ZG2eFrY0BVNxhqzhS0oty7hq8fpGo5FZ/UgZXWzGGjwxxpIaH3T2k0pI9QbrdP9pFa6M5Fr2YAN2h2ljUIuXrshhGeFF/AGeLESYUD0hUTnGZ3Rhgr1NM3YA5Lf+4oJicH6Cj5QZCmjFtkxyHQID31aqvTs7taWMxK+arCOadIebzBRFScFuTg0OEmUfJwnsEqUVnkDXGO0fSRScdN+TgmOtZtAJykmw/oZyZfkjKVnoTIcOafYxx2xs0o64bMu5WHOwATylajskc182/Sw4pvr9kx3Fo/NNBsEhbJbe7ys+t5CrleRLUXJ9PRqYrjg86IktOTU3soRw8SOGxYQrxF7wBsOjlYsM7obZndllRd50fMua30kszZzLmNgk/IEymw80rw9WaWBnNNjq70evzfAEl/DJpOsWgqMW8k6rDFlkhrOMKGKgfJejmfzAqpIQ6ww5e/K6CX2oJ8dfudsAMVZyIZ0yVcuq7h/NsKEDr9ziTM0vnA4vO/coBkqL4OE/YH35qIInB2NR9LIO2cDKhS7RTKSm0HzSLEnb2aLhJIKBcSgXwoqSZjyJb0gCCRvAEoB2e9RvJ7U65EGPgLkDOTjZf3ZpbrO9bXR5shbm/nZQuJbjelXU0nHREepVDEOTVYYe1hWsaWwYWnCIuNqq+2094bksJwMiVCB/mBYDIT5tWwMzbD9L7tBZ9KW31H3F51y6ulPzuu+4YkFrvJzPvucPk1j0dYDdyzed3Llrj5hcPuRBuGiyy5+bP4P9e6DTw/175yY2m/9NdG0YM3BgQ8OXHD6ebR88RV3/SmeTPWk3uJdL7d5LrBHli57d2LqsDZS3HBbS6Tvp+Ke7utObP/91G+3nJz4aHzBugv5hHhsYPjZmO5/001eWvTIM2N06uB4buEfo7u/ePzT5fXbPpe+mLpnWRc59tyRy98ff2XTaPPSnz9ZuGuLy/Xtr7z3hcVrPl544OijP/48fih2aWuj64AtXHnjeMeuIXClT1n78OH2sb9eXPSVdN+1W+768N8+9ehrp/6et/XudVywfsX1N40g0q8fNTbmOq8WH/rav79m6Vc7OxtbEieXbR968sTxhyK/fLbzg75bVq7o+WZebvSFJfd+vuD4rW/vu7tQH7/mz/nzaw6r3z393T1RaezIZbc/enoR36a8unfN1tbrTix/1n/RseXKRNupp9KHlqyEEv361a5DXU9sO77Eyev5rqlLvpxqmedy/QdG0leg
|
||||
eNptVX1sHMUVt2uhlLamVmkkJKRkYlVtFN3u7d7eh8/BpOc7n7+wz/FHYqc4x9zu7O3mdnfWO7v3laZqkqKgJkXZPwCJIgXiy13lGoeQiEKIKRVUFERLPyCRGxSpRUSE0oCaFlWt1HT2fCa2nJXudmfee7/33rzfe3OolkcWUbHRPK8aNrKgaNMFcQ/VLDTjIGL/qKojW8FSZSQ1Nj7rWOrSNsW2TdLp90NTZbGJDKiyItb9ed4vKtD2029TQ3WYSgZLpT83H9nfriNCYBaR9k7wvf3tIqa+DJsu2qewA6CFAAQK0kzZ0QAkRCU2NGwWxAxSQBaAmgbqwXiYwMbAVhDI0DXAMihhh2pkVE21S0A1wJgJDZUobLsPtFtYQ54TUiI20tsP+MAa333qFtD/HR1048wadYcga53ygw7HQb4Pa9DnGWwB3kZGjnsvWdAxMB0kYQBLjgQtGwEFl3asBv0irXXIu+mpAZUAvQQMqKMd60OZpjs6lpDmbWVNmwliRlcN1dM06B5P38S2ENTpQoYaQXSDZmzSgtqO5SFxbORATUFQouW+0tRWUTCx3YW1JTwNRRFRdGSIWFKNrPtstqyaPiAhWYM2mqMxG6hOEHcuh5DJQE3No+qylfscNE1NFaEn9+8j2Jhv5MjYJROtF895uTGUFIbtnkvRIGL9/pES5ZoBeDbYwXLPFRl6YKqhUe4wGqTxVM26/OXVAhOKOQrCNHjsVpeNF1brYOKeGoJiamwNJLRExT0FLT0cPLt633IMW9WRW4uPrHfXEN5yJ7A8z0bOrAEmJUN0T9UL8Ys1xsi2SoyIKYb7DFcVMc6pyF36RzotyumM3jVYUOK7e6I9uf5SJtk7I3fgwO5gybH7ZRTL5cOZQFEUYtnJUnwix/CRQCQYDgf5DoZnOZZneWaAC+Pe0SldT1gSN5KMCHli7JJT3eGyIxVjbLxbTjqpInISLIrsmcz2F/oCgZmp7u597B5hPL3Psvtm8qrADpOxgVE4WEgOpCd78oXtgEbn5FWpqy8Ee/p0RUlPxLBOuImhfG44EI6MDubKeTwTCO3CHNuTLPXsGciuCo8TBIZrRBjmgh2c9yyscENDRtZW3Fkhyv/MQsSkHY4OV+mR2Q45VKE8RG//ptaYHidTg7covLGSoJx0F5OW6gOBCBhDJghwgSDgo53BSCcXAr1D4/Pxhpvx21LwzLgFDSJTGvasUL4mKo6RQ9Jc/LZkX/TITivphU97lEFFExPENKJy5yeZ0eW5yfQnzi53FoOtLB1J5bpbd7HO+kK5WJBER5KUfEHnouWgoGaQI8rnGiamhT03NCBGJ+4sz0cXGpIV3s3RXDmG5xiOf4m2virSNvOSMbFlMwSJdFLbJXfJp8Oi12NdAh8SwvTgt9MZKWqOhMacTALrlJlkOzAtpGEonS8ydF4gTdVVWpj6f+MWIG4lRI1fXK9g4xyi90UtWC8r98pqDQt5+F4St2CC0Wj0wu2VVqAEqhIVoufXahG0Oho+oJMX1ys0IE5yZL64os2okrv0LbpIiyiARFEKR7hQJixBIQzlEBeBfFgKyx1cSDwdTzJxKCqIGavzz60lpoZjQ/3xFyaZ1URiUubyLVkzMDFUWa6OIYsWxp0TNexIdFhaqEqxRmNT7rkOMYo4DglhKSNEuXCI6aZjaAXtC9pVvElbvy4PVr1yGtlfN+/dfPTLTfWnhf5u3rSP8/gw1/Zw4QcPfxhNHPzG00tP7nrr4qnHOsc3Bq6CYf3Eh6SS3Pm//XdsOX7X9H0XPr1y4+Xtvcdm3vus9QPLfyg4/EbqnctPfPva8Obf/vz9Qf/QK3eHf/Xa9Z985PrLGy5uOP6Xq3sGgsPxEeX0ZLmt+T/vVvqO+t45a3c1HW796MrFl9qy99yLJp5a+KNv78aZ0Uevd2278/XIJ0Lm2Jkbn33epbfs+P5ru47+NPPDPvvVyJ/axPdnr29kH/nmHbAl/fbIydaHjnz8aa/8h3c33dN2ObN44cSN32/q/+f033f+becvH9ic++SpN55M3n/t2ed//PljG2LKf1Nbn/6gZTFx7EDrVzfdtK/+bnbmma/8u/WFrz/4xKWO+0/8665HfQ+1lR+/ceTmfRMtsa1DhchfL5XfypNX933c4XvzWqL5gfe2XT5Y2Dudunh+jrkUeuTCVmHT9Ml0/RBbmh5//muvf/dLTU3/B/U9eXg=
|
@ -1 +0,0 @@
|
||||
eNptVQ+MFNUZPzRpLVHRRGNr/TOuNpb2ZnZmdvYfcI13txy3HPd3D+4o0e3bmTc773Zm3jBvZm93kSInRA1NuEnR0rTGCHu7cDkPKedhLRgxrYqxUTGBHIlVQqJEjBpRQ0qLfbO3V+4Ck+yf977f+33f+77f981INQ9tgrC5aAKZDrSB7NAF8UaqNtzoQuJsqxjQ0bBS7ulO9e91bTTzC81xLLIsGAQW4rAFTYA4GRvBvBCUNeAE6X9LhzWacgYrxZnLmwIGJARkIQksYzZsCsiYujIdugisxy4DbMgARoO6pbo6AwhBxAGmwzHNJhmGNgN0nanF4lMyDmYcDTIZumawyhSxSxEZpCOnyCCTSVnARETjAo1MwMY69J2QInGgEdjcyCzwraH7mOSDBmVUlPlwl0A7sPlhumNgBer+VtZy2BAXZh3XzmAfa9Jdgf4Sx4bAoAsV6ATSDerIommkQJ+L56KbqxoECk3yvxpuKWuYON7kwsQdALIMKT80ZawgM+u9kC0hq5FRoKoDB47TiE1YK4s3noPQYoGO8rAye8p7EViWjmTg24NDBJsT9RuyTtGCV5vH/duxtBam40110yCak8GeIq2wyQicFOP4FwssTT8ydVoyVgc0nopVs/9tvsECco6SsHX1eJXZw5PzMZh4Y51A7k4toAS2rHljwDYi0qH5+7ZrOsiAXrW152p3deMVdyFOELjowQXEpGjK3litEIcXHIaOXWRlTDm85/mKjHEOQW/m63RaVtMZowkligOxJG8rgxkY0gxbaV7JrU10caVouK+jvX1dISKvdzqTXfm4zApRMSqFxXiYZwWO5wROYIsgmtJEix8gbhTwQ6ESAqUBM7cxjQa4AT6XkwbR6n4Jr5VUVJKG3EiuJ9WuOFkjDQZahsxUf2tfKsv1DnVmelblSr9GhdVpaSXOLmdodG4eKU1JPtIW7mkjsa72lnWqFRvuTJhh0iyuSRc6erHlbOx0VnGFVHNvDM8LLxKKsHw9wggvxXj/mZzThg7NrKN5e8WwtM+GxKKNBR+v0JQ5LhkpUx3Cd96q1pt2T3fHFQnfXk5QTXpH22zUyIhRJgUtRuRFiREiy3hxWSjKrOrsn2itu+m/pgQP9tvAJCqV4co5yVdlzTVzUBlvvabYj/pip5X0w6ddysKChQlk61F5E4Ns3+y0YpOJQ7OdxWI7SydBqebWO1pT/XCpMKzIrqJo+WGDj5ekEMpAV1an6kcsG/tuaECsQby9oTA/WbfM6W6c3pVWnmd54a+09ZFM28y/jIVthyVQpvPRKXozjQYo+D3WFBLCoQhN/HI6mmTdVWDKzSSwQZVJljOWDXUMlFcKLJ0XUEcGooWpfddnL/HKYXr45asBDs5BOqWrUq2s/KvzETb0+f1LXKGR4vH4kWuD5qhCFBIPR15ZiCJwfjSCaJCXrwbUKfbwZKIwh2aR4s08QBfpuCqooiSEI2pYyoQUmaZVAqIUEtWQqITjwoHWNrYVyBpkUzX9edXE+q7mzmTr9CA7X0hstzX7bqqamJhIVSspaNPCeOOyjl2FDksbVihXX/N6byomx+VMTBCVTESM85E420LH0Bzb/2VX9idt7SW1teKX08z+Y5F5744bGmrP9fTz/fdOz7Gu1x+67ciF299tenzp9paPM7t27/7NyA/xG6Njl7Q7Dl86+eGpRMvFH+/87fYfXd6Nlt625JHv+AtqXs3f3fD807tH2slxdGbNl/nIyR2ZX33U9cWj2/701Tm4ZseD8Y7Ayj0blmovHdvVPX5+1+LsaHjm/mdLu373dG7LXTufSK5ItoV7R1tSx8U3jx5zT5w9cWDfHd+eLfd+Uk7dmFkcvLvhsYf/u+4nL7y+RNi+b1PL4hub1Z9XPr2/4czx/idH7zv32om/nJtWVj/z/smDS55bojyw4vd9A9bFVw9/9cjlR7f+svfWt4z95ey2P09H//CDe07B0bWLNtz5wceXSKD1jxemFt+05Y1Tpz8bDTa9/dD0v5v4eO+hSze/lvvwyH9SbbHu7Wd/9sFPT79t8StOP5U//8znb950/O8npi5a3zyn7R8di5zJNG18p2qcvDPx6ajbcev01oODky/9873lB84L59nvnk3Xknp9w/7hLV7HdQ0N/wN40l3V
|
@ -1 +0,0 @@
|
||||
eNptVX1sFMcVN0EpJGkVmoY0pZUysWiCqHdv974Px0LnD7ANZ599R2zXRO7c7uzt+nZ3lp1d3+1R2oTSVEmVkC1BSUSh+bDv4GIcUlspEEAoUtX8EajUpkG4SqqECFSgShqBKjVN0tnzOZxlVrrbnZnf/N57837vzc7yGDKJgvUlk4puIRMKFh0Qd2fZRNtsRKxdJQ1ZMhbHk72p9Cu2qZxfK1uWQdb5fNBQWGwgHSqsgDXfGO8TZGj56LehoirNeAaLzuySJ7Y3aogQmEWkcR0Y3t4oYGpLt+igcQjbAJoIQCAj1ZBsFUBCFGJB3WJBXCd5ZAKoqqDqjMcJLAwsGYEMHQMsAQfbFJFRVMVygKKDlAF1hchsYxNoNLGKPCPEIRbSGnc0gQW2ZeV+0PWgRhlFsR5uE2QuAm+1OQ7ynViFIE3x9wNvIiN16NQ17xPFgGEjEQPo2CI0LQRk7Kyvp/06sEXceXpuBGgO0KGG1i925RE6o2ERqd5U1rCYABtiLNvMYA+r01mevollIqjRgQRVgugEjdmgKaVAj4tjIzvKMoIiTfgHDSvGZUwsd2phEl+DgoAoP9IFLCp61j2cLSpGExCRpEILVajPOqpKxK3kEDIYqCpjqDS3yz0CDUNVBOit+0YJ1idrMTKWY6DFyxUvOobKQrfcmV7qRLzLl3So2nTAs8Eoyx0pMPTAFF2l6mFUSP0pGdX1N+sXDCjkKAlTU7Jbmts8VY/BxJ1IQKE3tYASmoLsTkBTCwen6+dNW7cUDbnltuRic7XFG+YCLM+zkdcXEBNHF9yJaiL+sGAzskyHETDlcF/iSgLGOQW55z8bGRGkkYzWorQ7A9EuzhQHMygga6YY72C3tPewxUiof1Nn58OFsDBkJbp6xmICw0f8kWDIHwtxDM9yLM/yjAMjKdlvcAPEjkBuNFBUYHFAz20bUQbYAS6XCw4q3ekg3hKUlGJw1A7nkqlO0cpqI3CgdVRPpdv6U1m2bzSRSW7MFX+sFLpHgh042wyod/aYIrZ0ceENoeQGEu3pbH1YMqL5RLseInH/5pHCpj5sWNsS1ka2kIr3RXGde+FAmOFqHoa5YJTznql5bahIz1qy+0ogFjhoImLQGke/KNEjs2yyc5zqEL3zdrnWP17u3XRDwivH26km3ZMbTKUJ+CMghQzg5/xBwIfXcf51gQjYmEhPttXMpG8qwdfTJtSJRGXYMS/5siDbeg6Jlbabiv2kJ3aaSc99WqUMKhiYIKbmlTs5yPTPdU6mq316rrIYbGZpUypWzbonq6rPFwt5UbBFUR7La1ysGAwoGWQL0kxti2Fizwx1iNEIPRzeP1VbmdddhcZKM88xHH+Mlr4i0DLzgjGwaTEECbRXW457vkmDBa/GWgJ8KBCmB99Mu6Sg2iJK2Zl2rFFlkmZgmEjFUDxeYGi/QKqiKTQx1f/aPUDc8RDdfHQxwMI5RG+McrCaVu5UPcJEHr8XxA2aYCwWO3Fz0DxVgEJigdjxhSiC6r3h/Ro5uhhQo3iZI5OFeTSjiO751XQwEkYSz2cQlPyhaARxPEJRDgWkSJTKMhSQuNfaNjBtUJARk6rqzy23D/XEE11tbwwy9UJieo25e7KsY6IrklRKIZMmxq0IKrZF2ixNVKJc/fEhdyYqxIRMlJZDVIQxLhxjWmkbmmf7WnbjXqetXpiPlbx06tk/Ltl+36+XN1SfpfT31VfWM2e6l8ZX7PrEeVI62/X27isZNpNu/PjFnbevvauYb97HT//1wmbfvavvvLBn+X8zv9/3u6eW+chBXDk4tfueR//ywqrHur+7Rvlw86WrD70XuTDcuvmB5+7cHT2xv8dx+KGt65/6ZMubpvFZ96GrW99/9o5sZ/Dv3+ppeSm5d3b3sn/3BaNPfLTq7t+6h9Rje84e+b49fu22fPGL+IzNDpbvfnf5geGGhvuuD9sz3zv56revzTZPvLiCezA0+/NbkgcSE2u+GV8W/1Hl/dPPHn7mcO4fe39yrPOhJbcevdB05a3/nFs51ZA4m/zfT78B7nj8w8OrjR+i5UfXBq61ln/lHriITl9ePbxq+m/nPn/6cXIpdtvwqXOXZi8fed556/Qbl7+za3/ln8dv/bQf930Z6439+Zd7Xvj0N+rKe/erH6Wnf7b08z8Nr2mSV7U8/WXgjP+udx/4wdVD4S9mzux79INT4o6J218F6Xcuzl78eP+56yeu33PlPV/1hJc27O3I/+uRWxoa/g+ikIyv
|
1
docs/cassettes/chatbot_43.msgpack.zlib
Normal file
1
docs/cassettes/chatbot_43.msgpack.zlib
Normal file
@ -0,0 +1 @@
|
||||
eNqNVQ1sG9Udd9sxdbRDlRBsAgE3b1ABucudzz7HrbrOcZw0zVKXfDRJW5Y+3z37Lr67d773zh/pso6mWjSIEKeyTEMMVJLYNEsDoVFhlBa2roWxCTQ0TYRKaNpQ+VgntI7SbdLG3jnO4iyVqCU7ee/9/r//x/v9/+9gOQdtrCFz1bRmEmgDmdAFdg+WbZh1ICaHSgYkKlImdiY6u8YdW5u/RyXEwpvq64GlcciCJtA4GRn1OaFeVgGpp/9bOqzQTCSRUnxn9eb9fgNiDNIQ+zcxe/b7ZUR9mYQu/H3IYYANGcCoULdSjs4AjDVMgEk4JmriPLQZoOtMJRiPkyGIISpkknTNoBRTRA5FJDVdI0VGM5m4mdY1rHL+OsZvIx16TnARE2j4h+qYZb6p6caK6zRCypLfa7DM00QxE2DuZQK1aAdDewU2WItYcvL/MKICM4M/l81EjGWjpA6Nr10brwpymplmUo659XPJixBfI2sPLQCjYcYoMiYw4FWo76c7BlKg7m2lLcIGEWtopuYhTbon0L+Y2BAYdJECOoZ0g9baoiIkju0x8Vx4qKxCoFCJvuvbMKEiTNyZ5bJ7BsgypOzQlJFCE3WPpQc1q45RYEoHBE7RmE1YEbU7lYHQYoGu5WBpwcp9FliWrsnAO68fwMicrubIkqIFVx5PebmxVMgmcecSNIhoa/3OIu0PkxG4YAPHP1tgacE0U6d6Z3VA4ylZlfOTtQcWkDOUhK32nltaMJ6pxSDsTrYDOdG5jBLYsupOAtuQgsdr923HJJoB3XJs50p31cMldyInCFx4dhkxLpqyO1m5iOeXGUNiF1kZUQ73CF+SEcpo0J2/1N8vp/qTxpa2vBrriUfimdZisrklm2pAgZ5g0SGtKRjN5KRkoCCL0XRvMdadYYVwIByUpKDQwAoczwmcwG7nJdTS0WcYTbbC72wOizls7kolGqVBRylEuVhjqtlJFKDTxMHw7t50a35bIJDta2wc4HaLXf0DNtmWzWkitwN3bu8Abfnm7f298Vx+M0Ojc3KasmVbCMS3Gara3x1FBua723OZHQEp3NGWGcyhbCC0C/FcvLkY3709XRMeL4osX41Q4oMNvPeZWdSGDs00Ud3xUFh42obYolMJDpdoyYiDD05QHcLfvlauTrynEm1LEr5poolq0j3VbGt1TCDMdEKLCfCBICNENgXDm3iJaWnvmo5V3XRdVYKzXTYwcYrKML4o+bKsOmYGKlOxq4r9lCd2epNe+LRHWViwEIZsNSp3upftWJj1bGvT8YXOYpGdBqY2WHHrnqqoPj9YyCuyoyhqLm/wkcGgqCWhI6fmqiZ0LHluaECsgd1xMczPVE8WdTdFc+VZgWd54ee09TWZtpmXjIVswmIo09eFFN35OgMUvB7bIgohUaKF30znuqw7Cux0kk3IoMrEm+kUhDoCyosFls4LqGuGRi+m8lt9ubA7EaLGL6wEEJSB9I0rByvXyp+uRdjQ4/eSWKIJRiKRl64OWqQSKSQiCi8uR2FYG40QMPALKwFVinHBwNOFRTirKe78N+iiXwlGBF6MJMOCICmiBIOhgCgCmaelkQJiSHom1szGgKxCtrMiQLfc1Lcj2t4aO9HL1iqJTVgLT3vZRNjUUqlSJ7TpzbhTso4chU5LG5YoV0e0z51rkCOQ52E4nKSueSnENtI5tMj2P91NeKO28sY/UPLu00yfXZW746G1vspnDf1+9hl5ZGviDL/h7JXvPnhmXdvwO49Ic7ceuPnQ21/91tq4abd/+cSbb/947PrLJ79e1taseWjkyt/3D358+kbfkYFZ333H7uw59+En//r0wswN+ZeKBwYuPLnrjY3n//r+n88cP7/3B98efWp47OPev/3Ouc/N3tLnbvnj6+PK6NHD/nsvzde9WbqN7xp/+MS57GOdA492a9Ibl8jeicM/e/fUXd1gw7pCu893+mT407nQ93543WhLy9l9a5+LzjYMrd7wBOk6tD563djYWfX7N/U1n+9+eeSxi5EzI5Psxacv75v8xbrCkZv/YA+7woe3rP/lDf+8mB99tftW5fHknmP7hw58NHk0+8TYkfc3Dk1OX/+Xt8bXK/LIK+81tVz50it7nn/532JrQ+Jc9tefPCB98Tv33/6nX1mXV+147vbA7O+Hv/nkf9L3ZJSj5B9OLPtBWRe/MJq48+SP/BdXj9/d87r0wU97Rg7P7O17r+0nX/nN3l3++Y1bK8Vd44tfqEvtW+3z/RfE1cHo
|
1
docs/cassettes/chatbot_45.msgpack.zlib
Normal file
1
docs/cassettes/chatbot_45.msgpack.zlib
Normal file
@ -0,0 +1 @@
|
||||
eNqNVXtsHEcZvzQEkCrRIhQ1VEk7dRqQGu9692699whWsc938Z3lR/yIH1BZ+5i73dzuznpn9h42hpI0BAlVsGkFKkqLiM93wRinqQ2EJI6QmqqpqFTxkJAdqFqoqESrtjSo/aeizJ7P+IwjNffH3c3M7/t9j/l93xyv5qGDdWTtWNAtAh1JIXSBveNVB066EJPHKiYkGlLL/X2DQ7Ouo68+pBFi41hLi2TrLLKhJemsgsyWPN+iaBJpof9tA9ZoyjJSS2t3HJ5uMiHGUhbiphj42nSTgqgvi9BF0xhygeRAIAENGnbGNYCEsY6JZBEWtFu4AB0gGQaoBeNzAoIA0SCQ6RqgDCghlyJk3dBJCegWSFhZQ8ca29QMmhxkQN8JLmECzaaZZrDFNzX9cs11FiF10+9tWBZoohgEwUEQbES7GDrbsEIjYtPJ/8OIJlk5/IlsFgK2g2QDmg/cHq8m5XUrCzKu9fAnkpcgvk3WEVoAYEpE24gGqLoKUrSIuVu4eYTumEiFhr+VtQkjIMbULd1HWnSPp7+YOFAy6SIjGRjSDVp3mwqSuI7PxLHhmaoGJZXK9dXA3WUNYeItbpXgeUlRIGWHloJUmrT3i+yUbjcDFWYMicB5Gr8FawL35nMQ2oxk6HlYWbfynpVs29AVyT9vOYaRtVDPlyElG24/nvdzY6ioLeIt99Eg2lMt/SXaKxbgWSHCcs8WGVo83TKo9hlDovFU7Nr55cYDW1JylISp96FXWTdebMQg7M31SErf4BZKyVE0b05yTFFYatx3XIvoJvSq8f7t7uqHm+5CLM+z4QtbiHHJUry52kX8eosxJE6JURDl8H7KVRSEcjr0Vt+fmFAyE7LZ1l3Q4iOJaCKXKsnJw5OZCAqOCCWXpDKwPZcX5WBRCbVnR0vx4RzDh4NhQRQFPsLwLMfyLM+kOREdHhgzzU5H5fqT4VAeW0czfR3ilKsW29l4Rybp9hWh28nC8PhoNlXoCgYnxzo6jrHjoaGJYw7pmszrIbYXD6YHpO5CMj0xmsgXDgEanZvX1bauVinRZWraxHA7MjE33JPP9QbF8EB3biqPJoOtRxHHJpKlxHg62xAeFwoxXD1CkRMinP9Z3NCGAa0s0bzZ1kjonAOxTScUPFGhJSMuPl6mOoQvX6/Wp9/Zvu5NCe8ud1JNeitJR28GwTAYhDYIckEB8NGYEI5xIjjcM7QQr7sZuqUELww5koUzVIaJDclXFc21clCdj99S7Cu+2OlN+uHTHmVg0UYYMvWovIVRZmB97jOpzqX1zmKQk5Usfarm1lupqb4wVSyoiquqWr5gctEpIaTL0FUyy3UTOhR8NzQgxsTeLB8JL9ZPNnQ3T3PlGJ5jOP43tPV1hbaZn4yNHMJgqNCXhpS81WZTKvo91hbiW0MiLfwhOuMVw1XhoCt3IpMqEx+iMwgaSFIvFRk6L6Chmzq9mNp3/RXDXrmVGl/cDiAoB+l7VxVq18pdbUQ40Of3k9ikEaLR6JVbgzaoQhQSDUYvbUVh2BgNHzTxxe2AOsUsb+KF4gac0VVv9UG6mBCjIZUTQ4KsKLR6giLKES6qwIgSlqMyL4jn40kmLikaZAZrAvSqnWO97T2p+K9GmUYlMX32+jNftRC29EymMggdejPevGIgV6XT0oEVyjXQPuYtR5Qo5DgoRyO8GOXEVqaDzqENtv/pruyP2tp7/+2Kf59W9oUd8v3f+2yg9tlJ+n9nneDuvnJz989XxM4dR96/OJe60T2XXkkMn9w7+tvHLzA3ln929qNp2AXa/v3cqaeeXl2N7gpcv/PrO/7RikuziYuvfsVIlZ3pb5x+682/xh4K33jx/IMzM6FT95wpnlz71sA7i/P64r3M9ckD07Hwtb2lP2v3vfKE0PO35f33Dv/gvefSl7kj0vMH7vr72JnHlkp7GPaf4/8607z4WlsgEHH/+OaTu/ZN7lp6abd+ffzo5xe/cyAgpIufeuqXf4l9aV9vMjjyB+/ts+8O3fnyfecefffH0e++/jn5tfzUZeucwdz04MFrux6+68OT88tHpqdf/8yVN07fiIWuLZz+zw/dtPLiMzsf/f47r3zQR/6UbF2Dvfu6pp5/YQadUi7v/+Ijsa9W935h/9WPRp7c/caeez6oHHz75uTY459eekD98MDa/W+deynNHgyuvXf10kD299/8SflIrOVHe86cmKDF/PjjnQG5s5DquiMQ+C8J5Ld8
|
@ -1 +0,0 @@
|
||||
eNqNVWtwG9UVdh6Q0lICJH0wbSeLS5Nx4l3vavW06zKyZDuOo/ghJbZpE8/V7pV2rd29m713bUlJaEkggaTDsAOktDxmSGwpoxrHISEQ8gCGhtIhdAol0wZKOrRDfjTNAKVlptOm6V1ZruU6M0Q/JN17v/Odc+75zrnbi8PQwioy5o2rBoEWkAhdYGd70YKbbYjJvQUdEgXJo91d8cR+21LPrVQIMXFjQwMwVQ6Z0AAqJyG9YVhokBRAGuh/U4NlmtEkknPvzv/ullodYgzSENc2Mt/fUish6ssgdFE7gGwGWJABjAI1M2VrDMBYxQQYhGPCBh6BFgM0jSkH43IyBDFEgUySrhmUYnLIpoikqqkkx6gG02qkNRUrXG09U2shDbpOcA4TqNduq2dm+aamK8qu0wjJM36vwXKEJooZD7OK8VSjbQytOVhvNWLGyf/DiAKMDP5cNgMxpoWSGtRvvzZeBQyrRppJ2cadn0ueg/gaWd0LWIEZPccYQIdXYd5Id3QkQ83dSpuEFTkfS2wriVysQXcF+ouJBYFOFymgYUg36GWbVIUU6HLxXGBbUYFApho9X3PzqIIwcSZm6+4gkCRI+aEhIZlm6jyTzqtmPSPDlAYILNGgDVhWtVPKQGiyQFOHYWHKypkEpqmpEnDPG4YwMsYrSbIkZ8K5xyU3O5Yq2SDOkS4aRLijoTtHG8RgBM4b5PjJLEtvTDU0KnhWAzSeglk+P159YAIpQ0nYSvM5hSnjiWoMws5YDEhd8VmUwJIUZwxYut97uHrfsg2i6tApRrrnuqsczrgTOUHgAodmEeOcITlj5UI8P8sYEivHSohyOE/zBQmhjAqdc38bHJRSg0m9WY3m+oIdvCX3J6Go6JYcbuXWR9dx+YCvt3P16g1ZvzRAYh3rhkMSKwQ8Aa/PE/LxrMDxnMAJbA4E4orH5PuwHQD8kJhXQb7PyGweVPu4Pj6T8faraxJetN6bUvPeIduf6Y6vlklaHwR9LUNGPBHpjae5nqFYsrs9k79Lza4Z9LaidBNDo7OHVbm5g/e3+brbcHDd6pYNKTM4EosaPhz2rB3MdvYgk2yOkXYuGw/3BFFVeH7Rz/KVCP28N8i7n4lpbWjQSBPF2e8LeA5YEJt0LMEdBXplxMbbR6kO4ZnXi5WRt6+rc0bCXxmNUk06J9sstZ7xBJg4NBkP7/Eygr+R9zSKQaY9lhiPVNwkrirBQwkLGDhFZdg6LfmipNhGBsqlyFXFftIVO62kGz7tUhZmTYQhW4nKGe9ne6eGPdsRPTzVWSyy0sBQ82W3zsmy6kfy2RFZsmVZGR7R+VDeK6pJaEupIxUTOpdcNzQgVsfOftEnTlROpnVXornSyvMsLxyjra9KtM3cZExkERZDiT4vJOecq9dB1u2xZlHwiX568U10sEuaLcO4nYwinSoTN9ExCDUE5BezLJ0XUFN1lRam/F15urAz6qPGL8wFEJSB9JErestl5U9VIyzo8rtJzNB4Q6HQiauDpqlECgmJwouzURhWRyN4dPzCXECFYr+g4/HsNJxVZefcHXQxGAz5gyFJCAE/TCaTvqBHDPJCAMoiBB4oB3wHI21sBEgKZONlATrF6MC6cKwjcrSfrVYS22VOve1FA2FDTaUKcWjRyjglSUO2TKelBQuUqzc84BwJSiEpGRSSqVQQhHh/iG2hc2ia7X+6G3VHbfmRv6fg1tNIn56XW7bnCzXlzwLSE+uaLyw98emqTflP5i/ccMPrp7++5JHsW794+LWH3t5W/+jRg5+dXP/ABy89zi37+aKG5jOkIP72o5t+9OzYz7Z7Muxr7b87fuq2936567bY7jebz/7k8PMffvrMiS23pJ/69frAKuWfO9seLJ26fdm3do9NFmMt7GMbd/05uVVYybU/vPPjxL43nl255qMTd/6l48m973zjxj1vhY8tzzx0dummxpqaP775U3HnrS+9s/T697/9J2eg70Ch2Fmz0nnj/rfP7G1d1PP+Xu/ShZElW+6+cE/dmP61C929wSX9Fxcv1G/07Lg5unZtuGkStP7q4h8mFo/8/Ya2RY+9+p/rft/75Zax85su3133QSn15OM73z224FRT60jidLTzex+v2Ap3fPiP2qdTXwwt/1Lr2cnjN72cEXsuF5/b/cDWR+77zfUR/xM/+OulxL/q2IFvXoi37Nu4Z/GiK6Wvfmf30Qd/eGn8s3n6gbt2vVIH4/FP9l53OQkj99Utf+/M+R8fuvhvesdXriyoee7VS/fa82tq/gsxlsAl
|
@ -1 +1 @@
|
||||
eNqNVWtsHNUVXkIfbqrSUEEL/kGmViOg8czOzs6+7JjU8RvjR7x2HBNgdXfm7s71zsydzL2z3l03qRIqQQWtGAL9UUAlibOLjBMIcRoHnJQmCgKpL2Q1rRGloaKtqihtCA9VTSi9s17X6zoS2R+7e+/9znfOuec75+4uZaFNEDavm0ImhTZQKFsQd3fJhtsdSOj3iwakGlYn+vvig/sdG81/W6PUIg1+P7CQgC1oAiQo2PBnA35FA9TP/ls6LNNMJLGaf2tV53idAQkBaUjqGrht43UKZr5MyhZ1I9jhgA05wGlQt1KOzgFCEKHApALXbJIxaHNA17lyMB4nRzFHNcgl2ZrDKS6PHYZIIh3RPIdMrs1M64hoQl09V2djHXpOSJ5QaNTtqOeW+Wamt5ddpzFWl/xeg+UYS5RwEreek6rRDoH2CqxcjVhy8v8wqgEzQz6TzcScZeOkDo1vXhuvBrLITHMpx9z4meR5SK6R1bsAzgBUW4yGU5HKIXaJmZVe7mc7Blah7m2lLcoHhRBPHTuJPazJdgPsl1AbAoMtUkAnkG2wi7eYIhnQ4xKFyI6SBoHK9PqOb82Ehgl1Dy3X4AtAUSDjh6aCVZa1ezBdQFY9p8KUDiicZAmYsKxwdzIDocUDHWVhccHKfRFYlo4U4J37Rwk2pyoJ8zRvwZXHk152PFO1Sd3pPhZEc5e/P8+axeQCghwVxBdzPLs9ZOpM/LwOWDxFq3z+SvWBBZQMI+ErjegWF4wPVWMwcQ/0AKUvvowS2IrmHgC2EZaPVO/bjkmRAd1SS/9Kd5XDJXdBIRAQIoeXEZO8qbgHyoU4tswYUjvPK5hxuHvFooJxBkF3/lIioaQSSaMJteaHo12irW5NwqBm2GpzmzDU2isUIqGB7s7OLbmwMkJ7unqzMYUPRKSIHJJiIZEPCKIQEAJ8HkTimmSJw8SJAHE0WECgMGxmtifQsDAsZjLyVnT3oIyH5BQqyKNOONMf71Rp2kiA4U2jZnywZSCeFjaP9iT7OzKFe1Hu7oTchtONHIvOySK1qUsMt4f620m0t3PTlpQVHetpNUOkWbonkevejC26vYd2CLl48+YorgovHAzzYiXCsChHRe9zaFEbOjTTVHP3h6LyczYkFhtR8MEiuzLqkN0TTIfwl6+XKuNvX1/3koRvnmhlmnRPtNuonpMiXBxanCRKMhcIN4hSQzDGdfQMTrVU3AxeVYKHB21gkhSTYdui5EuK5pgZqE62XFXsJzyxs0p64bMu5WHOwgTylajcqa38wMLg57tajyx0Fo/tNDBRoezWPVFW/VghN6Yqjqpq2TFDjBXkIEpCR0lNV0zYVPDcsIB4g7j7JVk8VDlZ1N0ky5VVXuTFwHHW+khhbeYlY2Gb8gQq7KmheXe+3gA5r8eagoFQMMwuvpENeUV3VBh3kq3YYMokjWwIQR0D9eUcz+YF1JGBWGHK35VnjLgTIWY8sxJAcQayB68kl8sqnqxG2NDj95JYopFjsdjs1UGLVEEGiUmxl5ejCKyOJiAZZGYloEKxP2CQqdwinEeqO/8ttkiEI1IolAooStjTpaiIETUWVaPhlBRT1WhAfqGlnW8Bigb5eFmAbql1pLe5p6vlZ1v5aiXxfdbCO18yMTFRKlWMQ5tVxp1UdOyobFrasMi4BppH3OmoElOSUVYuNZWMieEYv4nNoUW2/+luwhu15Qd/V9Grp5k+c939ax+p8ZU/19PHftV36jtrzvzzys17jufW1AyuP3VmTc97z37jKz9Ye8tL0y0jf/yH3Dh3R+1/xuYubd6Qmz99+cL45Y8/mfb9thhddc+W9j2z1jPvf/TqXa83nNxZe9fOdz74V7bh8J/8Eeno2B6/PnzDA7PxfePDkXdnrYkff/L123c9OvDkW2tbH/wgWvva6dr3nn78eX1k7zNHzLfflOpn5s53vP/r48mnHjtX88A2n2/jRyPOh3fueLemcOHE43sefqOm46Fu3y1/GFpde+P60dnkK7u+NL4u8fudf/3ao9/tXx28ct+tTTc+fO6mfc/felvjKr47MKQkGo7NRR5a/aMjdRc3Za886z947KWffFE6dbLQ+8SXp/5e/PzFLd87emXutkdeze8lf/nqpaFzcPaHp1Nvf+Hjy43PTb0W+vfFP2+c27AudPaNN++buZDecfZzH96w7nxi31TbnT/1H/zNHTf94uz5323T7/05u8dPP73ed/RvTTMbVvl8/wWFCr4/
|
||||
eNrtWVtv20YW3r7mqSj2dQGWKFBgIdLU/RIEC1mS40sdBbaT2GkCYTQ8FMciOTRnqIsDPzTtH+BPaOpIheGmLRrs9rLZ533YP+A+7I/YX7CHIhXLSIG+F9SD5Zk558w53zlnviH1fD6CQDDuvXfJPAkBoRIHIno+D+AkBCG/mLkgbW6e3+/uH3wVBuzqr7aUvmisrRGf6dwHjzCdcndtlF+jNpFr+L/vwMLMeZ+b01/f++CZ6oIQZABCbSifPlMpx708iQP1iIcKCUAhig2Ob4WOQoRgQhJP6krTE2MIFOI4ysKZ2KYiuSJtUPo4VrilTHmIEn3mMDlVmKd0vIHDhK2rOUUNuAPxJmIqJLjqWU65sfcmU7Y+dpUDbpo5BX0mAhQJuJkb+3PMh3DDSiggUM+e4ozLTXDiqYEvtRLXXOaxWNLDuTx+CxkAcXEggxBwjJv7iK0Mg9iQoVfP5jYQE5H/75/eP7e5kNGrm2h+SygFNA4e5SbzBtE3g1Pm5xQTLIdIuMAoPFjkKroYAvgacdgIZolW9B3xfYdREq+vHQvuXaZRa3Lqw7vLF3FoGubHk9HrLjrR3Fq7P8W0e0peL9V047uJhilhnoNp1ByC/sz8xfovqws+oUM0oqUlFc0S5VerMlxEL3cJ7e7fMEkCakcvSeBWSj+szgehJ5kL0bx1/93t0sXr7Yp6Pq9Xv79hWEw9Gr20iCPgHzeUQQZTjXK0EX1pzCjnQwbR1f96PWr1+u6dnbHdetSpd4Zb0/7G3ROrxguPStNQblnQHI4q/cKEFpuDw2nrwVDLVwvVUqVSyte0vG7oeT2vbRsVfnfvyHXbgWnc36gWR8J7aHXXK6ehOWnqrXVrI+xOIGzrUH18ONgabxYKJ0fr68f64+JB7ziQmycjVtTvif3tPbIz3tjuHXZG49sKeheOmHlns0w6m65t9x40uSuMB7uj4b1Cpbq3Mzwd8ZNC+SE39M7GtPN4e7DinlEsakbqYcUo1Yz482pZGw54A2lHX6H01wEIH5sNPp8hZDIUz8+xDuE//56njfyiu3Ndwn8+b2NNRm82ApZTClVlH3ylYBRKSr7eKFUbRlW5u3tw2Uq3OYhL8AobbSLXYBTPJO1yW8HjIxAg74TS0mrfHwTEExbWZWfZA3Nqh94QzIvWb1b/m7j6MbVxPNizGkx8LkBL3YwuD7W95EzTtto/JK2m8WBAPHa6aIXozaINxqeTsUlD07RHY9eon5aKrA8htV6nKn7A423QIc0V0Yta/VW6sKzDC4zd0PKGZuR/wtAYxbaLY/F5gLECxUNUTqOrnEsmcc/dKebLxQom4jYeX9QJTdgP+23uYqWK24ofgMOJ+fNEw/MDHOYyTNTib3pAi+i8jMo/visg8QDDo3xeWqTZ+NeqRACx/TiGazOler3+z98WWpoqoki9bPx8UwpztmImX3DFj+8KpCZeGOJyspTWmBldfYSDXqlarZQL/T5AzQBSM0rlAjVNbLNqoU/KtPZta0NrEWqDtr+ox2jePrrX3N1q/f1QWy0sresnBDb3uPCYZc32IcDERBfU4aGJh2cAM7S11zyKXtdoHQwDwKS1Wt2olLX17v6Csj6bxXnzBr/+5YlJJGkgbTBTbagxv1FkN63ZWm/Dw73u0Wh3x2zv8MoDUaiufzKg4+1HyAQq7x9jfaYa+jUj6osKRgGKFS8BbS6bs1DJLWlllVW0uI80o6rla6iV8FjPQtcg8NHDeAvL71m1ch9IpVorxaZtzmjMtEi0zDNhojaMnIqWJVEbz1I2U9+ybKyxpEQVBwFYoSDohhc6zllOdfgAK74vkomcipsjvfbQfySOVOrp2a1bfzigrlHZZGoGRKy4uCplWCwUP8xwSGpiEwL4WGRoJGiQDIgEiPjxKcMiwcLiQQZFAgU+rmdQLBQbT7wnXobFQvGRPc2QSBrEZNkNK4VC2hmDpFAIig+uGRgLRRrwcQZFUhdjllHI8uadIbFEYkyCjEQSMP6W3bLeYrEOlIQio5G0TbLbxVsWIdn7mxQKHsr4tXj8e1MGSQJJdsV4e2KwrE+Wr7QYONkdI/sp4Nnvh64KyX11JfhP2917nae3bv0fs0k9Cw==
|
@ -1 +1 @@
|
||||
eNqdVWlsE0cUDiVFUSu1oKIU0Ygs7oEK2fXuen1GVpvEuQiOExxIOM14d2xv7D3YHQfbEMTRFqmtShcBhV6i4NjgpgFExNUGFVEEPX5UKqEKIP5AS6noQaUK0T/p2HFKIvjVlezdmffme8f33pst2R6o6aIiT+kXZQQ1wCO80I0tWQ2ujUMdvZaRIIooQrrN5+84ENfEkfkRhFTdZTYDVaQUFcpApHhFMvcwZj4CkBl/qzFYgEkHFSE5snm9SYK6DsJQN7mIFetNvIJNyQgvTE3iXKJ5nkTUKkFTFWHSlBjMb8d1qJl6V+EdSRFgLL8VVhFpoawkimtBJa8r410Gv3WkQSDhRQjEdIg3EJRUHAlWzGPRlL03G4FAwHFuT0cUHRkDkz0/DHgeYnQo84ogymHjs3BKVKsIAYZiAMEcdleGhbwYuSiEKgliYg/MjJ0yjgBVjYk8yMvN3boi9xfDI1FShQ+Lc/nYSJwMGRmDPuxETbO5LYlTLBMMxTko+kiC1BEQ5RjOGRkD2J+MWpB/PlGgAj6KQcgifUZm7PDARB1FN/q8gPf5J0ECjY8YfUCTbNyxiftaXEaiBI1sXdvD5orCB+YsFMNQ9qOTgPWkzBt9BRpOTDoMkZYkeQVjGJ/QA+P5iUE5jCLGAYZ1HtSgruKCgVsz+BiK61vSmAv43cVssXL2+1rGSbxeUp72YF6MoQZNrCJYO+GHKsHSLEcwNhfNuiw00ejt6K8rmul4JA1HOzQg6yFMRf047Vk+EpejUMjVPZLwoTzhOJq8+7hOSZhQFR2SRa+M/i5y8VjLkM2eY2PVRSpaGMhiqmDWGCowvy6VWCfwcUGI9KyTaGeKs4hBGOdDg8UjqqbkzWCHSEk3DrCsdaAoGc99DsdKkwxN0swpXPwij0stH4yqaIjUIY+bFCWNkSoJJPJ15rYwVouNpulqQpT5WFyA/njQo0iYHb2aUDUYU4BwOkHijoExURIxMYX/4gDQjbQVHz75sAJSohCPiixHF54zEzU0mMfPB/EAhnM6nV88WmkcyoJVnHbn6claOpzoDcNK+smHFYoQ+2m9PzGuTYqCMfICXgQYwcoDmmO4oJUJ2aBdsDMhK2RYxs5Cu90ODtc1kHWAj0DSX6g/I+tZ1lrjba7L+TF2naJERbjjypSpgQAfCgQlt+hJdjqaaU3oCkJLRNKEmnpqiaeVStmti1uampYmbPwy5G1u7XHyJLZh56ys04pJo2iKoRgyCez+CKvSnXrcDuhuS0oEqU45ujYgdlKddDTKdYkLOzhlCRcSU1x33BZt8zcJKCwFQGdtt+zvqFvsD1Pt3d5gW2M0tVxMLAxw9UoYswlQxG2uJnBtijgt7mKHkLhDyLH+sIz3RzUhFGrATU2ehtVEE57vPjmWrMaNhYsJ4jeQoF9E0N2qyHBkJ85BvEcU3M20rcHa1qA7Wptql4ZUxzqvR7bqNeyiQKKlXVHRWi9qpBL+mnaHMiEJNouNpIt5sNGco1A8D1z/n14d7yInNjzpU8cusqys6LIYCmX8UMMNZOT4mBIX8GDXYAZzvrhmmTHo4J180A6B0xmyOGmbk6zFI3Mc7b/xkM7fCoUbbXMm33Zy+PyUNZVvlZUUnqn4NzqK3j3beo6e7vnj/pvnb02zbj7IPVtadmnOjH0LiZnOr21Wx/WL18tyudF37rwxWHHyJn3DvvejVaVEB6lNH27QhntcZ059cOinGxt2Vb33ZDTT+fS9w3uGBn2f3todZe6W7qrsumuOr26/Kgr1P6/pLH/xgM8bun2Hbbk1ws0ihw+6L6OKwIdh5t7xdOL99MirctOSXzxPKTNKvqQ29Db6b5y4OPdmaV/9LWGrqbJySlnLlQW1rmxFy7mgxi1g337p0up9l0tPdHx8dsuZfTum7VY3rWlx08Pa+t4nVqy81/rPX5kfU9/f45/zesOH0NZXfo3CnVdnlvc9f2P0zh7XjGu9pX/OGfh9tMLkapEuPP7DvNmVe9LbHGuv3f+qonzGoqq/232jr785e9n831p3P7Nt4zfchU23u9iVL1+7zBw/v3zvNCm8ZBYbO7p9YyGNU0ss34b1xsdKSv4FMhxWSA==
|
||||
eNqdVWtsFFUU3ooajCBqIiQKOlQFA72zM/uY3W1dsd1u6RbbxW5b2qI0szN3d6e7M3c6d2YfrcZQBCNqdHyExDey3TWlFBBEXkWEoPiIrxqlKiTGgI/EaJRgFAze3W6lDfxykt2Ze86533l859zbn09CDUtIKRuSFB1qvKCTBTb78xrsMSDWH87JUI8hMbs8GGrZZGjS2KKYrqu40mrlVYlGKlR4iRaQbE2yViHG61byrSZgESYbRmJm7KG+chlizEchLq+kVvaVC4i4UnSyKK+X5lOBhTJVg8LlFVS5hhKwIDYw1MofvJ9IZCTCREEUVXXgQECWFKlgqRAZS95Y1yAvk0WET2BIBDqUVZKHbmgFJIZ2PZiPQV4kWT6ZjSGsm8NT497KCwIk2FARkCgpUXNLtFdSKygRRhK8DgdJsAosVsUcjEOoAj4hJWFufJe5jVfVhCTwBb21GyNlqJQc0DMqvFg9WMgMkFIourkzSIKoDliXZ0iBFYqlHW6a2ZYGWOclJUEqBhI8iSenFvX7JitUXogTEFAiz8yNbx6ebIOwOdDIC8HQFEheE2LmAK/JnGPHZLlmKLokQzPvW36xu5Lygjs7zbK0a/sUYJxRBHOgSMNbUzZDXcsAAREMcyMzPFGfBFSieszcxNpcr2sQq6Rd4Joc2aYbuD9LuIAfHc2X+ua14LIJEk9YZmdrCS/mSJ0mVVA2FxWCKmVjbA6K9VQ6uEqnm1ra2DLkK7lpuSQN21s0XsERQoV/gva8EDOUOBQHfZckfKRAOMmmED7pUgDTKsIQlKIyh9pB8/jAgEDtjvHuAkiL8orUW3RrjhSZT/WmU6JgiGIsmZIZT6/DLoWhIUR2lraoGiq4IQEBGZPieNzDJc1E7QdJrgxgGcCwe0jzSwJptUIyKtJ0gKFARlTPmGMVMp8u9JnXzjrtHMMwVZSkCAlDhCEjXItkwg6uolQNJhAv7k0DMjEwIckSIab4Xxp/bGadZPPuiw10FIfkoMg7mOJzYLKFBgv4hSQuwDg8Hs/+SxtNQNmJicfl2TvVCsPJ0bA2Ge++2KAE8RqDh9IT1kASzbHbyKJL5MWI4PY4BUZw2p0ujol4nC43B+2RiOjkbexWXx3w8UIMglCx/8x8bUdTdWPANxgi2D6E4hJ8+uuyaV1dQqQrLHuXpWK+FX6PPx7IhOuW9kTcyLbCkTH0QARWx5Nc2JYW7NXR9oyvNQ5Yl83l4DgH6wYszdAszYIGhkNLmztkuVYTmeV1LnsSK22RYA3Xa4jpatpXE6kzgmlo1NLQ1dkeDaTqbbaejpqabrrT3tLVren1PUnJTjfhUEMzvyxV19DV7k+mCJu8HvNaqyjSmxIpi7c0IYBMCCDzYWMq2Yn5qKLEYg946amnYRVVT073oJLIVJHBIs0EyZuXYUjSobcJKXDsWVIDIymJ3non76+XY7Gu1mokY6a1MRlvsnGu5mXx3iTqsTnbEEP76zL+zobopCIwdjtgSnXgGIe72DwXQv+fUe1qB5MHHgTV8WssryCsSJFILgQ1MkDmoJBAhkgOdg3mCOfN1R3mTrfggQwT5hxh6PAwnBPUkCNzAu2/4yFbuBWK99nqXGHslOiRsu5bHptuKT7TyO/8ef2patR/97Vrfz23/qcXB8rW3L3f0Trr+TOJZ7iZHfxBecNLYF/W+e75E5ZFZz69+pE/z/TtzyydZYGr8cEHrPetuunkEnrOZjBy54bmga7uD7cu9q6c/cKux0+Ozl447519sz44vnbvJ3u49S+K3mMZ36F7N9y4YPS29d/fcU/vplP9LY+0vvvXdWff/OLzV64cPTrr+iX3fHL6Zdg0+4ffTpVZ5p5FX4XmnF6Dpy+YM7Ag7ve7377rsoPrWvOvbmvrXLjz8PuxU0Pzfjrwd+eh/lRq2yl0dO26mTMun7v7ivfWuu/89umZz2254f6+44uML4NfLVq/ePS3j1/+Z/OqBvDLXfH69i+Poy3CmpG+3y0nzj7zzZIVr0q7ORqfC+y9esbh+de8txF99MfcH49d5fx59KGNQ9TC7BuuHd99Gvy5e8b04dvPLv7s5qFNlYsHhl9nn5jbVtHYltsx71F34NbTTSesxYJOs3xwpN8Tusxi+RdCumK8
|
@ -1 +0,0 @@
|
||||
eNqNVX1sHMUVdxwqKvVD/qPQSlHF1kVCJbfr3bu9L5uUXs6J7Rjbl9wR2+HjNLc7d7u+3ZnNzqzvI6Q0SSlq0lZsSxCEipbYuYPDcUIwaRoIIqKorZuUhAqQg1olqCKESi2h2EiUj86dz/W5jkROurNn5vd+7715v/dmZ3kU2kTHaMWEjii0gULZgrg7yzbc6kBCf1gyIdWwOh4biCfGHFufuVmj1CLtbW3A0gVsQQR0QcFm26jUpmiAtrH/LQPWaMZTWC2ca+7Y1mpCQkAGktZ27o5trQpmvhBli9Zh7HDAhhzgNGhYacfgACE6oQBRgYsgkoM2BwyDqwVT5eQo5qgGuRRbczjNFbDDECnd0GmB0xG3DmUMnWhCq4drtbEBq05IgVBotm73cEt8M9Obaq4zGKuLfq/CMscSJZyXW815G9EOgfYyrNyIWHTy/zCqAZQln8uGMGfZOGVA81tXx6uBUR1luLSDbv1c8gIkV8k6fwFmgUPAhFcgvovtmFiFRnUrY1HeJ/h56tgpXMUitiuxv4TaEJhskQYGgWyD3bXFRMiAVS5RCG4vaxCoTKJ/a2oZ1zCh7uRS2R0CigIZP0QKVlmi7sFMUbc8nArTBqCwwmJGsCZqt5KF0OKBoY/C0ryVexhYlqEroHreNkIwmqjnyNOCBZcfV6rZ8UzIiLpTAyyISE9brMD6A3GSIIcE8XCeZxemI4PpnTcAi6dk1c6fazywgJJlJHy999zSvPFkIwYT90AfUAbiSyiBrWjuAWCbAfmZxn3bQVQ3oVuOxpa7qx8uuvMJkiQEn15CTApIcQ/UCvGbJcaQ2gVewYzDfVwsKRhndejOvJ9MKulkylyjdxYGQz2irQ6loE8zbTWyTri9s18oBv2beru7N+cDyjDt6+kfDSu8FPQGZb837Bd5SRAFSZD4AgjGNa8lDhInCMQRX1EHxUGU3ZrUB4VBMZuVh/QNCRnfLqf1ojziBLKxeLdKM2YSDK4dQfFEdFM8I2wc6UvFurLFLXp+Q1JehzMdHIvOGdXVNT1iYL0/tp6E+rvXbk5boVxfJ/KTiPe2ZL53I7bo1j7aJeTjkY0h3BBewBfgxXqEAVEOidXP5II2DIgyVHPH/EHpCRsSi00luKvErow6ZOc40yE89YdyfeLtH+hdlPB1451Mk+6J9bbu4bxBLg4tzit6ZU4KtIvedlnkuvoSE9G6m8QVJfh0wgaIpJkM1y1IvqxoDspCtRK9othPVMXOKlkNn3UpD/MWJpCvR+VODPGb5mc939P5zHxn8djOAKQXa27dEzXV54r5nKo4qqqN5kwxXJR9ego6SnqqbsLGUtUNC4g3iTsmy+Jk/WRBdxWWK6u8yIvSb1nr6wprs2oyFrYpT6DCXhdacGc8JshXe2yNT/L7AuziO9hcVwxHhXEn1YlNpkzSwaYgNDBQj+d5Ni+goZs6K0ztt/5yEXfcz4yPLQdQnIXsjSvLtbKKLzQibFjlryaxSCOHw+HnrwxaoPIxSNgnHV+KIrAxGslrkmPLAXWKMckkE/kFOK+r7syNbJEMhUPpsJgGqUAYhFJ+WU2BgAgUMaSGgRpKBQ9F1/NRoGiQj9cE6JY7h/sjfT3Ro0N8o5L4AWv+aS8jTJCeTpfi0GaVcSuKgR2VTUsblhjXpsiwOxVSwkoq5A360gpzHgjza9kcWmD7n+7Gq6O29sbvKFXriTIvr7jnhj1fbKp9VrLvZ59Rd6B3pdTy8r8++VpUuPvcq7e+PtLWunvqrUgs1vIdffT4qkdPex4ZdvZ+P9ez5bnie0fu//BP254v/Oe1FbFA3BcrnUlNT88KQnnuU+Xf9O1/7l7le2GbiS6+9I9XnprdNXNLBdEz717304+PffSj6enHh97vMJpHTj7c/suflX7/i7Mzf+R2/NxLzz40+OqqD29a8+RLu4u7LmhHcterXzmy7yS5tun86u1K168u3L8vMt2z4cen2rv8Hbc0x3bcmXgwfdRzbs+7R8888PUPnpx764lrSi37BA/37XtvjL0TvMb8sndXi5y49r3jv/vuqUvf/Mnc4R/cZajcg5fe/jh3z4vO6fbHLmYvzn00d+aNjXd/Ycvlx2744NnZh/bA753+uxK8vpLY/+ZXX+w9d+BTUqk0793ruS/wjnL+0mWh/9l799839kl6Q/dUdvbC+eQb3ujZ1y8f2vra5K9nC4+KoanbMjejocN3Th58RUqcLB38yzfe/HOwdsUrm760+a+rR5qbmv4Lx1LS0Q==
|
@ -1 +0,0 @@
|
||||
eNqNVWtsHFcVXjdKCSguRrSVKLVyuy0YEs94Znf2ZTcg22vHjuPnun4hWO7O3N0d78zc8dwZe3cT0yREUFrVYaAioBbc1PYusVwncYyb5lGIqkoFCqVxRXGCUEVKxEMQqIoCfRDurNfYlv2D/bG7957vfOece7577uH8MDKIjLWSGVkzkQFFky6IfThvoCELEfNITkVmEkuTHe2R7gnLkJd2Jk1TJ9VVVVCXWawjDcqsiNWqYb5KTEKziv7XFVSgmYxhKXOl5A/73SoiBCYQcVeDL+x3i5jG0ky6cPdjC0ADAQiSSNHjlgIgITIxoWayoFYjI8gAUFFAIRmHE5gYmEkEYnQNcBxksEURMVmRzQyQNdCgJRSZJFl3JXAbWEFOEJIhJlLdo5VgXWzqWlEIncBYWo37f3iO0EIJUDNAgyr6/FoHiyBjA7y5QgUEG0amEjQDCWsVJkjCYRpZFOm5OBUVqnDIWNCER4AINYpczsixUYgEM+sCraa7WXJAhWYS6AaOKUgFkiwBmdKlNkn1i3RHxRJSnK2EbjJe1seYlhHDDlajuzz9JaaBoEoXcagQRDfoqehULhTocHFsYDSfRFCiYvqdq2wyiYlpz64XyEmnWsqPNBFLspawn01kZb0SSCiuQBNN0wo0VJCfPZ1CSGegIg+j3LKXfQrquiKL0LFXDRKszRQrZsyMjjaap53qGCo5zbTn22kStc1VHRmqZA3wrBBkuVNphh6frCm0A4wCaT45vWA/v9agQzFFSZjiLbFzy86zazGY2FOtUGyPrKOEhpi0p6Ch+oUza/cNSzNlFdn5+o6N4YrG1XBelufZwOl1xCSjifZUoRHPrXNGppFhREw57ONcTsQ4JSN76e1oVIxHY+puOZzpDTZzhtQXQ96kaki1DexD4TY2G/B1tTQ19aT9Yr/Z2tw2HBIZPuAJCD5PyMcxPMuxPMszGRiIJD0610usAOQGvVkZZnu11FBU7mV7uVRK6JP3dgv4ISEuZ4VBy5/qiDRJZkKNwt66QS3SXd8VSbCdg62xjj2p7ICc3hsVGnCiBtDsrGFZ2t3M+Rt9HY0k2NZU1xPXgyOtYc1Haj37oumWTqybQ63mHjYdqe0M4jXp+b1+hitm6OeEIOd8Zle0oSAtYSbtCSHo+6GBiE7nB/pqjh6ZaZHDk1SH6JWX88XZ9Ex7y6qE75oMU03aFxsNuRJ4AiCCdODhPALg/dWcp1rgwZ7W7pn6YpjuTSV4utuAGolTGTasSD4vJi0thaTp+k3FfnF5IDBO+vSWMiitY4KYYlb2TB/TtTyVmebwmeWbxWAjATU5WwhrXyyofiSbHpFES5KSwyMqF8oKXjmGLDE+X3ShY8EJQxNiVGJP+ATvbNGyortpWivtPMdw/PP06ssivWZOMTo2TIYgkb4DZsZeqlRh2rlju728z+unB19DJ7CoWBKKWLEwVqkySQ2dQkjBUDqXZui8QIqsyrQxhe/iG0PsSR91PrsRYOIUoq9RXii0lXthLcJADr9TxCqNEAqFLmwOWqHyUkjI4zm3HkXQ2mx4j0rObgQUKSZ4lcykV+CMLNlLD9BFFAkxDsYQRyPQX58PenhfMMYF/Tzycd6YeLK+kamHYhIxkYIA7Xy4v622tbl+oY9ZqySmXV9+hPMaJpocj+ciyKCdsadFBVsSnZYGylGurtp+ez4ohsRY0BODiPOFOH+IqaNzaIXtf7qbdEZt4TU+lHP6qSVeKvnujse2uQqfLWbnpbZrXNnFm7saX9Dl6QXls+8sDHz4yFnQs+2+Izvf+N4DX7r36D/2HTz47o2XNfnNU8dCFwI3/vSLJ49+7ODc1MAhz+N3L97e+/5/zsw/HJ3949+yX9l/6wAZHvrELz/453snvhEonx7q+tX1Hff8u6t1+48uHe97u+ZK+PXDP7UWnqp6mLmeqFjcN9/9TMtcddPQg/13Xn6p9K7FpQOne/1S6dzApRe3ud68/9UnXjv2r1zPN0c9l91nZjufvaPFtfNbLY9cfuU7Dfs7rx0S3to6OjbaXi68u/UD++D1israLR8SFuZuH2BKjzR98uvMO5/5eHCP2dEQLtv73tOj4288+gj7/Y+8Wm7s/PL2K8f0zFv3KmX3lNeNC6XnXLutvw9Olez69o3n3T3lf3n9sRtPXb129cXjY3++82s33y85OfPcb2fuH/z0+Fjs+NOe8+UlD16vO7+j/ETZidK5u7N149uP5WtqfBd0cPQm2nXfa9pPEou3pn/9qUcXxn482vDX2waiH71jsf5nB3K//8HWqxYafZwPPzn+xM9/M/a5Epfr1q0trvZqTZ64zeX6L4VnuD8=
|
@ -1 +0,0 @@
|
||||
eNrtWctu20YUbbYBCnTRXTcMUaBAIdKk3pJhFLLlV1I/5cR2gkAYDYfiWCSHnhlaj8CLpvkB7rormjhSYbhOggRtmjZdd9EfcBf9hqJf0EtZqmU4QNcF64Wt4dw59565j2PTDwcHhAvK/Gsn1JeEIyxhIaKHA072QyLko75HpMOso/W12taTkNOzTx0pA1GemkIB1VlAfER1zLypA3MKO0hOwefAJUOYowazur9fe/+B6hEhUJMItazce6BiBr58CQt1l4UK4kRBikPcwA5dBQlBhUS+1JWKL9qEK8h1lWEwMaYimSIdojRgrTBb6bIQLBrUpbKrUF+Z95suFY6uphSVM5fETkRXSOKphynlkm+H3lCWP/EA0bJ0RRJw48WR7LEWmTweCsLVw/vwxGMWceNHzUBqGT2nyZA3WGzrw1MTfgrJCfJgIXlIYA1+A7hWsIuhDL1wOHAIsuDS/3jvgyOHCRmdXr7IZwhjAvDEx8yifjP6rtmjQUqxiO0iSY6BgE+GaYqOW4QEGnLpAemfn4qeoyBwKUbx/tSeYP7JiLAmuwG5un0ck9MgNb6MXq1BEJXlqfUuZNxXTD1b1I3nHQ2yQX0XMqi5COLpB8P9nyY3AoRbAKKNqinqnx8+nbRhInq6gvBa7RIk4tiJniLu5bMvJ5/z0JfUI9Fgbv2qu9HmhbuMbpp64cUlYNH1cfTURq4gP1w6TCTvapgBRvSN0ceMtSiJzv6q17Fdb3gztNrdLi4b3NppkIzjcasyr9+uruq9Qm7z1tLSnU4e78qV5dWDEtbMQrqQzaVLOUMzdUM3dVProkLNSQfGtggLyNjL9Cjqbfut/Trd1reNViu7Q29uZdntrE172b0w31qvLVmy6dXR9uyeX9ua26w19Y29lcb6Yqt3l3Zu1rPzrDmtQHThAbVmlo38Qm59QRRXl2bv2EGxvVL1c6KS/rzeubXBArm/Ihf1Tq2yUWQT4eUzec0YRZg3skUj/jod14ZL/KZ0oifpfOlbTkQAfUa+7MOVyVA8PII6JL/9Ohj18OO1Wxcl/OFRFWoyervAaUpJF5QaCZS0kc4qZr5spMtZU1lc2TqZG7nZikvwDDqtI6fIQfzkvF2mFZgcXBA5E0pbK77Y4sgXNtTl/LgHBtgJ/RaxjufeWf1v4+qH1MZ8oGs10gmYINoozOhkR9s8H2facvXleatpjDeRT3vDVojeDtug3eu0LRxalnPQ9oxSL5uhDRJi+9XoSMBZ7AYC0jwRPTHT+dPRzrgQj4E8lIKhGeaPwI1i6LuYTMA4kCUYBqjsRmcpD3XippvJmLlMHjIxDaMLu6FFamGjyjwoVTGtBJy4DFlvOhoMEOJSj0Kmht9Hw1lERzk4/PqqgYQRBmN8kB3m2fhl0oKTGD8mcQGTLZVKP7/baAyVAZNSLv3mshUkbQLGTHvi9VWDEcRjQ5x0xtYataKzj2FRz9iFTMY2SgXTMi1cMAq5TKaYNhso2zBMyyg8m1vQ5hB2iFYbFmQ0qO6uVlaW577f0SYrS1sLzsVr4DPhU9vu1wiHxETH2GWhBdOTkz5gbVZ2o1dFXMKNYsbMW2mzZORL2uxabShXX/TjvPnN3z96biGJyiAZ1FLLaqxtGJRNq8wV6JZx9/adlcxNI+ix6iKvVjZWlz1rf2NTTamssQcFOjqhX6ihPixhMMBQ8pIA5rg782ZqrCyXhQWqKJ2DE+f6VbchLMIDiA7g/dB1ActhFMeyCqpKfYt01LKRUgFKIrX8YKRg6j+SGnsf658KC07sUCD3HO0wpbqsCTXeEGN48AhaWoeAQSpGVvcPr1//79/MxTXUYmlOKPdUUokrW/A7X1LJ30hs1pdIcns9/kspsdybLKnUy4nN+bbTTSx3iyZW3eK3Q4nlLjBKrsJhztqJzXyb+onljhJMvY14Ykf9Z4lN+yzBKBTJ1bkES3wbicRyZ6GM32PG/xJI7B0kWOcdmtzStylx/39jmbi0//n1V48SRf7fuapCskCdYHuvurY6f//69b8BZPHYWQ==
|
@ -1 +1 @@
|
||||
eNptVXtQFHUcRyjFNJMxcjSbTsrJadi93XsfCM7BiSAeIIc8fAzt7f72brnbB/vAuyP+kKQsNd0crdEpX3CniCBK5SNsgnGyx4xpOnGZ2vuhjaZNqWNGvz2OhMGducfv9/1+P9/X5/vd5mgDECWG58Z1MJwMRIKU4UFSm6MiqFeAJK+JsED28VRrWam7Yo8iMrHnfbIsSFl6PSEwKC8AjmBQkmf1Dbie9BGyHv4XAiAO0+rhqVCspTGDBZJEeIGUkaVb3phB8tAVJ8NDRhW0eE7SsSEdR7BgfkamLkPkA0ATKRIQM5pWwhuWp0BAu/IKMmJEzYisiB5e0+XgLQ5/JVkEBAsPNBGQALyQASvAbKCihoWh1qaoDxAUzPVS0tRWHy/Jaufo+LsIkgQQH3AkTzGcVz3gDTNCpo4CdICQQTsMmgPx6qjtfgAEhAgwDSAyZKUeJAQhwJCEJtfXSTzXkUgSkUMCGCtu17JDYEk4We0phUE4ivRlIVhoToejJhuKHQwikkwwXABWDgkQMJ6IEJcfHykQCNIPQZBEE9XIkHHnSB1eUttcBFnqHgVJiKRPbSNE1mI6PPJeVDiZYYEazS8b6y4hvO/OiOI4au0eBSyFOFJtizfi/VHGQBZDCMlDDHUXFiF53s8ANXaztpakaz1sDuMMVdmKMJGq9gCjjxUpxwJ0qbMEDVvN5cWFhZVBC1kju4pKGuwkglsNVpPZYDdjCI5iKI7iSIiwun0GAauSFCuB1RnDDBGu4vz1tUwVWoX5/aZqZlGFiV9qopmwqU6x+MvchZTsZWuJqrw6zl2RX+72okvqXJ6yhf7wMia4qNa0gPdm62B0SgND5RRhlgJzWYFkKynMq6QF2yqXkzNLDsPi2mDxEl6Q613yQjTodiyx8SPCsxgtCJaI0IKZbJj2dA5zIwA4r+xT9+BG414RSAIcGfBSBJZMVqTmVshD8PmpaGJ2dpcW36dweqsTclLtLRCZTJ3BqnMDQWfADCYdbsnCDFlGXLfQVdGRn3BT8UAKdleIBCfRkIYLhikfJX0K5wdUe/4Dyd6rkR12UgsfTikCggIvASQRldpRjZQPLQ2kyHl4aLIQXvQSHBOOu1V746xfFQ6uokiFonwNq1jMHjYZGQ9QSLonYSKIvOYGBoSwkrrHZjJ3JiTDvGuHucLOYwiGH4Wjz5BwzLRkBF6UEQmQcE3JITWWyRJBbcZyjLjZaIGFz9YxHBlQKOBWPE6ehcyUsnWCCAI8QR0LInBfgADDMrAx8e/ECpTUVjM0PjJWQeb9AC7LqCneVuzESA0RaPhaEvdhTHa7/YMHKw1DGaGK3Wo/NlpLAiOjwQ2sdGSsQgJiNyZ1BIe1EYZSY8/CQ62JBDRN2wy4h/Z4jHaPhyCMmNkMPBbaRnusoCu/AMknSB9A3HH+qVFnTYnDVZT/XjUykkhIqTD0iohyvMQxNB1xAxE2Rm0nA7xCwWUpggjEKnfUqD020k5CbMoDDAY7ZrEjeXANDaP9T7tWbdPG3xWrI1o7Oe/Jca8/vS41Kf6kwM/goLykm7uATb14L30CujLQX5l55K/ynoLkxisDs1PXT1O2Dmyat3reyfTkwUbH11vO7pr0yq1rOScab+WmlEUqs8uOLTjTeXgdf/eA/e8LTcqA78qtN/PvnBu8/e9c5d6f9Qd+LvrovP7szap3z/2eVewZuOx7KrXljavh5QEruNG2AvTVuyyfbCt54rHPxNlNe4Pdy6b/tDJ26Mzj56e8401Kun7HNaPJ9eH5KYdKC2adXPvovou2h3YmX1gzd/0Zp0Nfl6tu6910unvjjlnOKcRb/Wl1408nIyfSCot3dvd/Wv1MSmr//l8mTf5mwh+/pmJ9aV+M69s+8eyelhtdc2bdTp7ZtO3UC2dXCxv6v9roSH777tp9qZeXN37c9QO9dTsqpF//Z+2ODdbunIf7TuWyaS3ZW2JXJ387xzow6H7tSvLxkpK+rh83f3fsSGzfb/S0mhdPbO5rbuiz7h98tcQ8ceaT81c4Lt6ccXnx+P04dan85OxHjn4Zu/1y3vTFuYuufZ91N0UrekrSvJZzEzYnJyX9B/ymRTw=
|
||||
eNptVXtsE3Uc7yQKKosKIjFiqEPFwO56116v7eaCXdfSbtmDbsA2GOV692t7a++xe/QxnERmBAUlF42PGBW3roUyXopzzG2AgqgsSkBMhnEhSmKMgJH4h4MR/bXrZAtc0sfv9/1+P9/X5/u9LekokGRW4At6WV4BEkUr8CBrW9ISaFOBrLyU4oASEphkXW19Q7cqsaPLQooiyiUGAyWyqCACnmJRWuAMUdxAhyjFAP+LEZCDSfoFJjHauamIA7JMBYFcVKJft6mIFqArXoGHorXQYqms5xJ6nuLAiqJifZEkREBWpMpAKupogTecwIBI9iooKgghIBzLs1lNHt7h8FdWJEBx8BCgIjKAFwrgRJiLokpZJAy1dKRDgGJgpmO6B5MhQVa0/TOjP0DRNIDogKcFhuWD2r5gOysW6xkQiFAKyMCQeZCrjZYJAyAiVISNgtSklXaQEsUIS1NZuaFVFvjefIqIkhDB7eJMNjcEFoRXtMO1MAi7x1CXgGXm9ThKWFHsYByRFYrlI7BuSISC8aTEnPzz6QKRosMQBMm3UEtNGu+friPIWk81RdfWz4CkJDqk9VASRxKfTL+XVF5hOaClHXW3u8sLb7kzoTiOWg7NAJYTPK315Brx2QxjoEgJhBYghvYRlqIFIcwCbfSaz0cHfH6urCoWcqx12pxhT8LvWtkWsArGtURCVTwBYA9HSb8xTpvswcaEY3UYwS1GC0GSBG5FcBRDcRRHKjFSWOlt4rgKicHqXBZTVObXBGrLyXaVidtRR3nApdbGgVqBAktzY9ATcxuNbU3l5a1os6nB1yop7rYoa0Jr5PpKL1UVc1X6Gp3RWKkeRqdGWabMbaacbi4U8q22C5yMra6OhmuMpMVbFW6PCm1G8xoBQ52uhLO5MjgtPMxkQrB8hCRGWLHss3+KGxHAB5WQ1o2b8N0SkEU4MKAzBUumqPKWJOQhGPk6nZ+crtqqWxRekKyAnNSGXBJbrDda9PVA1BsxI6HHbSUEWWK26ldWN/Q68m4a7kjBQw0SxcsBSEPnFOXTdEjlw4DJOO5I9qEs2WEns+HDGUVAXBRkgOSj0nobEe/kykA8FZ9MThYiSEGKZ9tzbrWhHOtj7fEYQ6sME4rGOMzWTphYP1DpwOG8iSgJWTcwIISTtW7SSOzPS6Z4l4G5YgiOIRh+BI4+S8MxyyYjCpKCyICGS0pJaKPFHBXPzliZCTebSFj4Uj3L0xGVAfWqv0LgIDPlUr0ogYhAMQNxBO4LEGE5FjYm951fgLKWNEPj/tsVFCEM4KpME7m2YsPTNSSQxc8mcQuGsNlsg3dWmoIyQRWbxTYwU0sG06PBjZzcf7tCHqILk3vjU9oIy2ijT8KDjyCsFquNYBjMb7bBA24GVgBII7ARJspCBw44XIiDokMAqc/xT0tXNNXYqz2OvkZkOpGQWnHyBZHmBZlnA4FUPZBgY7QMHRFUBi5LCaQgltfepB220jaAYX5rIICZbBhpRsrhGppC+592yeymzb0pXkxl28kHTxacXrx9ji73zIKff/9VvKf5n7AHB28sf2Twkv/Rvr0XMgbnhS43WrZgzqtF2DOt4NATsXtRcGrOxJ93P7XirMPw7MjluYHoOFlQ17fqsbrUuW/+ufjqjQO+4bLH97xw3/nY/M3PLlqz4ya+SLzq27yHXSPenF09sDvwcof4TvE593vbSqhoS+/bf0ifLlt4/rk4fwaMosmuS1+s+/ikNFy8fXygsSGonvpx1+7CiQ90usFC4TtP4XCmcCS98ET3trGPLqzfqmtg3n6ox9n7ZLWnWkhSlW7H3ytD71ydp7Tv3PWa/SH33IJujJ39VvPrG35r3hW+Or+x9fIYORS9q+xI8cEmQ92See70ElI8uGfWzo33erdu7N+55RgiLj6OXcG/vVjS+eHjrojtl4dRdsO2B8SR7mTZdxdffGAjcXzJjg7j7Fce65iYGF57om/h+7GqITk4obS8+WvLtvWn3z36y76nlv9Mdj2SGnzYs+iHYx88rfv+S/v1zdHecmbxWfKv+73k+mJXqXrjHss1h/r0/KVX+Pj475u7uE7PuqV/vp756vm3opbOcfOlM95vVs27zoXZrXhF6Vh/4U991xu7N+zb662ueeNormOzdJ9bCub+dZdO9x9fx1Kh
|
@ -1 +1 @@
|
||||
eNptVWtsFFUULpqgkkgKpoqiOKwGEujMzuzOvlqwbN8Ft912Sx8YaO7O3N2d7s7cYe5Mu7uAj9poDJA4EiUSExWWXa21PKzyEh+ARAQlxIisaOMfRRM0Skw0NgbvbLfSWibZxz2P75x7znfODOT6oIYlpMwalhQdakDQyQGbAzkNbjQg1gezMtRjSMwEW0LtewxNyi+L6bqKK+x2oEoMUqECJEZAsr2PswsxoNvJfzUBCzCZMBJT+X822WSIMYhCbKugHttkExAJpejkYGuUFlNNS2WqGoVt5ZRNQwloiQ0MNduWcmq6LUwkkGW5mGpE/ZQAFKqJAhhLWKdSyKB0JIJU1VSYCSUg7v/H6iSZLsWUnKIUIMOqmbHXE4mMRJiwRFFVp52Mi9YNLYwsW4VIOfKLdQ0CmRwiIIEhEehQVkkViaGFxTKeLbkYBCKp8VhJaSaGsG6OTK/bPiAIkOBDRUCipETNt6NpSS2nRBhJAB0OkaQVWOiKORSHUKVBQuqD2Qkvcz9Q1YQkAEtv78VIGS5ektZTKpypHrJuR5NWKLo52kKS8DfZgynSYIXiGN7LsPuTNKmYpCRIx+gEIPlk1YL+2FSFCoQ4AaGL5DGzE84jU20QNvcGgNASmgYJNCFm7gWa7ObfmSrXDEWXZGjmaoIzwxWVN8I5GY5jPAemAeOUIph7C404NM0Z6lqKFhDBMF9nswJCcQma+Ws9PUKkJyyvlGpTnd4mVhO7wtAZkzXRX8esrW1m0h5X25rGxo6kW+jWA03NfT6B5jwOD+9y+FwszTEswzEcnQKeUMyhsp3Y8AC215mWQLpTiW/skTqZTjYe57uk1e08WstHpDTfa7jjwVCjqEflHtBZ3auE2mvaQlGmtTcQDjbE0+uk5Ooevg5FKymSndEniSubWHe9K1iPvc2N1R0R1dsfqFVc2O94tCe5phWp+saA3sAkQ/5WL5qSntvpptlihm6W97LWMzLJjQRUonrM3ONwOd/QIFbJqMKns6RkuoEHMoSH8NynueLM7m5Zc4PCZZlawknzeL0mlVMODxWCKuVgHTzFuStYR4XTQTUE2odrimHab0rBA+0aUHCE0LBukvI5IWYocSgO1dyU7MctspNOWumTKaVhUkUY0sWszOEuum1iWdFNte9MTBaNtChQpHQhrHm8wPr+dLJfFAxRjPX1y6wvzTulMDSEyGjRRdWQFYYkRMvY3MP5PCNFzSTvhshdSedZmuWOkNGXBDJm1mVUpOk0hgJZj3rKzJfLIGnN2Eon53K6SeErKUkREoYIQ0a4FsmEmbiSUjWYQEA8mqTJvoAJSZZIYwrfxdWLzYyLOB+eaaCjOCRLOscX2sp+MNVCgxa+dYkbMLzP53v/5kaTUE5i4nO7jk63wnBqNpxDxodnGhQhdrN4ODlpTUuimX+YHHo4zuv18pzLFYY+zsd6WB56I9AliJwzwns93n019XQNEGKQDhX4Z+Zqu5v9gaaa97roqUSiW9SJV1NOQViRIpFsCGqkMeaQkECGSJalBrMEq83fbY56BZ8Q9kT4sJeN+Fi3j64ma2gS7T/aZaxNW3hHPZW12qlEP5nV/uDW20sKz63kc/26HvQ3n2RL379S9tL5wIuj2+ccKWPK7i99bc7iQZsnn5l9OZ5f2Dr4d794KPDZU2cXRfoi0StLwbazz+zc9nWl/933lq3/jvnhu6c3VP053pBO7XZ/uAtdXP6E3T1uf+SOOd8+vioQ/Wq0Xi57vhZn4rcNUId/PJvXWs+NbF0x/76FQf/F7tVXvrTtHZO5z691b2z7NXjx+9SZsfmnF4w/5xT5O6+mzhw8c++F74//9OTOk/OFb1+fc/s9Y2taTpS+uk3c/sxHpy7tu1QZGZy9ZOWxj3/5Qzvvv7xpnrZrw5t3Pb5g7vKrwyffrvr91rHVD/0zolErdqmn7lq3KF9z4tKmzfuv73j5sWe/aav7644lC77YuueBB5c/cMbZ0Fi9/TfbeEvItap62RMX5lWd3pFdeOfBxcLmF8ZRy2tvdeya+4rh6Lj7Le/DHxZKRwp44Odj3C0lJf8CQsxC+Q==
|
||||
eNptVX1sE2UYH/AHxiAQFI0m6lnBJbDr7vq5DhG7rl3LZJ3rYB9Ex9u7t+3Ru3tv9951bRGQDzFEiV4wfgTECFsLzRggBBWY8ROmSIgfUTeJCajx2zm/IvED3+s62RxN2t77fPye532e3/PcxnwKqlhA8pReQdagCjiNHLCxMa/CTh1ibXNOgloC8d2N4UjzHl0VBhckNE3B1ZWVQBGsSIEyEKwckipTbCWXAFoleVZEWITpjiI+M/jnGosEMQZxiC3V1Mo1Fg6RULJGDpagcBsVKpeoGhS1VFAWFYnQFOsYqpa1FdREWyiKyLS8jQqiLooDMhWiAMYC1qgM0ikN8SCzZDzMqBIQ9/9jtZBMyzElZSgZSHDJ5Nj3EYmEeCiaorii0Q5ES4IsmJYykbHkH2sqBBI5xICIIRFoUFJIDTVdNZEYq3ttPgEBTyr8Wdns7gTCmtE3sWoHAMdBgg5lDvGCHDf2x7OCUkHxMCYCDRZIyjIs9sQoJCFUaCAKKZgb9TIOAkURBQ6Y+srVGMm9pSvSWkaBk9UF8240aYSsGUfCJAlvqLIxQ9orU6zVUWVlDqZpUi9BFkm/aBGQfHJKUX98vEIBXJKA0CXqGLlR577xNggbPcsAF45MgAQqlzB6gCq5HIfHy1Vd1gQJGnlf4+RwJeXlcHYry1rdhyYA44zMGT3FRrw4wRlqaobmEMEwnmdyHEJJARqDP3d0cLGOqLS4vivha/F7/MlQJhqo64xVIVuLI6NroRj0JlOuqC3N2b3x1oxveZJm3Ta3w+VysFU0a2WsrJWllzIuVNfUJkm1Ks80Btz2FJZXxMI1rqzOp71WX00soIfTUK+1Qnd7azzUFbTZOttqalZb2+3NHatVLdiZEuzWBhxZ2gTquwJLO1r9qa5FFMlOTwn84qAT+INSItGx3IskzCxflko22FzupvpkNoU6bc4ViLH6Axl/+9L4uPQYu51mShm6GEcVY376xrghQjmuJYw9Nie7V4VYIYMKN+VIyTQdb+wmPITvDuRLE7s7XH+ZwnO7awknjf6AKlRQNjcVgQplY2wOivVUO1zVTg9Vt6y511cK03xFCh5qVoGMY4SG/jHK57mELichX/Bdkez9JtlJJ830yYzSMK0gDOlSVkZvK900uqroUO3h0cmikRoHspAthjX6i6zvyqa7eE7n+USqS2I8WYddiEKdix0puSgqMsOQhGgJk+K4mb6SZox3BXJXhmYZmmFfJqMvcGTMzMsoSNVoDDmyHLWMMVghgbQ5Y4vtrNPuIoVfRAkyJ+o8jOjRWiQRZuJFlKJCEQH+WJom+wKKgiSQxhR/S4sXG91O4vzSZAMNJSFZ0XlHsa3MK+MtVGjim5e4DOPweDwnrmw0BmUnJh6X89hEKwzHZ8PaJPzSZIMSxG4G96bHrGmBNwbnkUMH5N3uqJ2zR4GTYWOQhy6eAx4mVuWORnkbX3XAF6B9gEtAOlLkn5GvbWvwLgv5jrbS44lEh5XRF1NeRlgWYrFcBKqkMUaBE5HOk2WpwhzBavK2GUeqOA9kmCh08G6nh3E56RqyhsbQ/qNdt7lpi2+oDTmznXL8rSnZWx+5qqz4mUa+ly5pj3vDnzOzHxq+OHfT2WtW1weP9fcOWaZMl4PHF262uJ+Zlzt66oXyu71/Dy/M94BZa0bOr10XHj4Tn0o1009ShQYV7mxo2fHbmeHvR1xfFE6vHzn5wIlX0I+oZdbx41H6jve8n97Q+8dpvc3ovKnNWHzhnT38o/sOR+/nM7vOrJvxauO26qH2U0Jg370ren755s76vL965c5zN++7elPr65GpZT95ThzK7x+5rwJUX2yasYVaed3pX8rLzofq/AOzm1cJbfcEWg14xPfbkg/XHHyf2luWHLr/1Zv7vfPpc76Hh17blV1V+HLdnJGzw+wtO+4aqHrr9/bnfn3j5adyH2WGtly75LttLyzQeupuHDg5c8PbWzfcMufBr8D6S2fnnb/zxlPzP5gpzThxrPrk1pFvb98JywfebHosu+Kvs80fDTR989MPWy58Eh5MzXh6V/3Mhf5PpgcirneOfv1sy6ntrj7x+o9XPpE5NFCrBv+ZahZ4Wtn2S3vqVpHnfwH0sWF4
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -23,6 +23,17 @@
|
||||
"\n",
|
||||
"We'll go into more detail on a few techniques below!\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"This how-to guide previously built a chatbot using [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html). You can access this version of the guide in the [v0.2 docs](https://python.langchain.com/v0.2/docs/how_to/chatbots_memory/).\n",
|
||||
"\n",
|
||||
"As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n",
|
||||
"\n",
|
||||
"If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n",
|
||||
"\n",
|
||||
"Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"You'll need to install a few packages, and have your OpenAI API key set as an environment variable named `OPENAI_API_KEY`:"
|
||||
@ -34,32 +45,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 23.3.2 is available.\n",
|
||||
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
|
||||
"OpenAI API Key: ········\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai\n",
|
||||
"%pip install --upgrade --quiet langchain langchain-openai langgraph\n",
|
||||
"\n",
|
||||
"# Set env var OPENAI_API_KEY or load from a .env file:\n",
|
||||
"import dotenv\n",
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"dotenv.load_dotenv()"
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -71,13 +71,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-4o-mini\")"
|
||||
"model = ChatOpenAI(model=\"gpt-4o-mini\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -98,34 +98,33 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I said \"J'adore la programmation,\" which means \"I love programming\" in French.\n"
|
||||
"I said, \"I love programming\" in French: \"J'adore la programmation.\"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant. Answer all questions to the best of your ability.\"\n",
|
||||
" ),\n",
|
||||
" (\"placeholder\", \"{messages}\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"messages\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain = prompt | model\n",
|
||||
"\n",
|
||||
"ai_msg = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": [\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"Translate this sentence from English to French: I love programming.\",\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate from English to French: I love programming.\"\n",
|
||||
" ),\n",
|
||||
" (\"ai\", \"J'adore la programmation.\"),\n",
|
||||
" (\"human\", \"What did you just say?\"),\n",
|
||||
" AIMessage(content=\"J'adore la programmation.\"),\n",
|
||||
" HumanMessage(content=\"What did you just say?\"),\n",
|
||||
" ],\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
@ -136,51 +135,57 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see that by passing the previous conversation into a chain, it can use it as context to answer questions. This is the basic concept underpinning chatbot memory - the rest of the guide will demonstrate convenient techniques for passing or reformatting messages.\n",
|
||||
"\n",
|
||||
"## Chat history\n",
|
||||
"\n",
|
||||
"It's perfectly fine to store and pass messages directly as an array, but we can use LangChain's built-in [message history class](https://python.langchain.com/api_reference/langchain/index.html#module-langchain.memory) to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers - you can see a [list of integrations here](/docs/integrations/memory) - but for this demo we will use an ephemeral demo class.\n",
|
||||
"\n",
|
||||
"Here's an example of the API:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='Translate this sentence from English to French: I love programming.'),\n",
|
||||
" AIMessage(content=\"J'adore la programmation.\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.add_user_message(\n",
|
||||
" \"Translate this sentence from English to French: I love programming.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.add_ai_message(\"J'adore la programmation.\")\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
"We can see that by passing the previous conversation into a chain, it can use it as context to answer questions. This is the basic concept underpinning chatbot memory - the rest of the guide will demonstrate convenient techniques for passing or reformatting messages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can use it directly to store conversation turns for our chain:"
|
||||
"## Automatic history management\n",
|
||||
"\n",
|
||||
"The previous examples pass messages to the chain (and model) explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also provides a way to build applications that have memory using LangGraph's [persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/). You can [enable persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/) in LangGraph applications by providing a `checkpointer` when compiling the graph."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, MessagesState, StateGraph\n",
|
||||
"\n",
|
||||
"workflow = StateGraph(state_schema=MessagesState)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: MessagesState):\n",
|
||||
" system_prompt = (\n",
|
||||
" \"You are a helpful assistant. \"\n",
|
||||
" \"Answer all questions to the best of your ability.\"\n",
|
||||
" )\n",
|
||||
" messages = [SystemMessage(content=system_prompt)] + state[\"messages\"]\n",
|
||||
" response = model.invoke(messages)\n",
|
||||
" return {\"messages\": response}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the node and edge\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"\n",
|
||||
"# Add simple in-memory checkpointer\n",
|
||||
"# highlight-start\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)\n",
|
||||
"# highlight-end"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
" We'll pass the latest input to the conversation here and let the LangGraph keep track of the conversation history using the checkpointer:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -191,7 +196,8 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
|
||||
"{'messages': [HumanMessage(content='Translate to French: I love programming.', additional_kwargs={}, response_metadata={}, id='be5e7099-3149-4293-af49-6b36c8ccd71b'),\n",
|
||||
" AIMessage(content=\"J'aime programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 4, 'prompt_tokens': 35, 'total_tokens': 39, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_e9627b5346', 'finish_reason': 'stop', 'logprobs': None}, id='run-8a753d7a-b97b-4d01-a661-626be6f41b38-0', usage_metadata={'input_tokens': 35, 'output_tokens': 4, 'total_tokens': 39})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
@ -200,159 +206,35 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
"input1 = \"Translate this sentence from English to French: I love programming.\"\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.add_user_message(input1)\n",
|
||||
"\n",
|
||||
"response = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": demo_ephemeral_chat_history.messages,\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.add_ai_message(response)\n",
|
||||
"\n",
|
||||
"input2 = \"What did I just ask you?\"\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.add_user_message(input2)\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": demo_ephemeral_chat_history.messages,\n",
|
||||
" }\n",
|
||||
"app.invoke(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"Translate to French: I love programming.\")]},\n",
|
||||
" config={\"configurable\": {\"thread_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Automatic history management\n",
|
||||
"\n",
|
||||
"The previous examples pass messages to the chain explicitly. This is a completely acceptable approach, but it does require external management of new messages. LangChain also includes an wrapper for LCEL chains that can handle this process automatically called `RunnableWithMessageHistory`.\n",
|
||||
"\n",
|
||||
"To show how it works, let's slightly modify the above prompt to take a final `input` variable that populates a `HumanMessage` template after the chat history. This means that we will expect a `chat_history` parameter that contains all messages BEFORE the current messages instead of all messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability.\",\n",
|
||||
" ),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
" We'll pass the latest input to the conversation here and let the `RunnableWithMessageHistory` class wrap our chain and do the work of appending that `input` variable to the chat history.\n",
|
||||
" \n",
|
||||
" Next, let's declare our wrapped chain:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history_for_chain = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
"chain_with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history_for_chain,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This class takes a few parameters in addition to the chain that we want to wrap:\n",
|
||||
"\n",
|
||||
"- A factory function that returns a message history for a given session id. This allows your chain to handle multiple users at once by loading different messages for different conversations.\n",
|
||||
"- An `input_messages_key` that specifies which part of the input should be tracked and stored in the chat history. In this example, we want to track the string passed in as `input`.\n",
|
||||
"- A `history_messages_key` that specifies what the previous messages should be injected into the prompt as. Our prompt has a `MessagesPlaceholder` named `chat_history`, so we specify this property to match.\n",
|
||||
"- (For chains with multiple outputs) an `output_messages_key` which specifies which output to store as history. This is the inverse of `input_messages_key`.\n",
|
||||
"\n",
|
||||
"We can invoke this new chain as normal, with an additional `configurable` field that specifies the particular `session_id` to pass to the factory function. This is unused for the demo, but in real-world chains, you'll want to return a chat history corresponding to the passed session:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run dc4e2f79-4bcd-4a36-9506-55ace9040588 not found for run 34b5773e-3ced-46a6-8daf-4d464c15c940. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
|
||||
"{'messages': [HumanMessage(content='Translate to French: I love programming.', additional_kwargs={}, response_metadata={}, id='be5e7099-3149-4293-af49-6b36c8ccd71b'),\n",
|
||||
" AIMessage(content=\"J'aime programmer.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 4, 'prompt_tokens': 35, 'total_tokens': 39, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_e9627b5346', 'finish_reason': 'stop', 'logprobs': None}, id='run-8a753d7a-b97b-4d01-a661-626be6f41b38-0', usage_metadata={'input_tokens': 35, 'output_tokens': 4, 'total_tokens': 39}),\n",
|
||||
" HumanMessage(content='What did I just ask you?', additional_kwargs={}, response_metadata={}, id='c667529b-7c41-4cc0-9326-0af47328b816'),\n",
|
||||
" AIMessage(content='You asked me to translate \"I love programming\" into French.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 54, 'total_tokens': 67, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-134a7ea0-d3a4-4923-bd58-25e5a43f6a1f-0', usage_metadata={'input_tokens': 54, 'output_tokens': 13, 'total_tokens': 67})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_message_history.invoke(\n",
|
||||
" {\"input\": \"Translate this sentence from English to French: I love programming.\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run cc14b9d8-c59e-40db-a523-d6ab3fc2fa4f not found for run 5b75e25c-131e-46ee-9982-68569db04330. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_message_history.invoke(\n",
|
||||
" {\"input\": \"What did I just ask you?\"}, {\"configurable\": {\"session_id\": \"unused\"}}\n",
|
||||
"app.invoke(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"What did I just ask you?\")]},\n",
|
||||
" config={\"configurable\": {\"thread_id\": \"1\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -366,80 +248,44 @@
|
||||
"\n",
|
||||
"### Trimming messages\n",
|
||||
"\n",
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the historic messages before passing them to the model. Let's use an example history with some preloaded messages:"
|
||||
"LLMs and chat models have limited context windows, and even if you're not directly hitting limits, you may want to limit the amount of distraction the model has to deal with. One solution is trim the history messages before passing them to the model. Let's use an example history with the `app` we declared above:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content='Hello!'),\n",
|
||||
" HumanMessage(content='How are you today?'),\n",
|
||||
" AIMessage(content='Fine thanks!')]"
|
||||
"{'messages': [HumanMessage(content=\"Hey there! I'm Nemo.\", additional_kwargs={}, response_metadata={}, id='6b4cab70-ce18-49b0-bb06-267bde44e037'),\n",
|
||||
" AIMessage(content='Hello!', additional_kwargs={}, response_metadata={}, id='ba3714f4-8876-440b-a651-efdcab2fcb4c'),\n",
|
||||
" HumanMessage(content='How are you today?', additional_kwargs={}, response_metadata={}, id='08d032c0-1577-4862-a3f2-5c1b90687e21'),\n",
|
||||
" AIMessage(content='Fine thanks!', additional_kwargs={}, response_metadata={}, id='21790e16-db05-4537-9a6b-ecad0fcec436'),\n",
|
||||
" HumanMessage(content=\"What's my name?\", additional_kwargs={}, response_metadata={}, id='c933eca3-5fd8-4651-af16-20fe2d49c216'),\n",
|
||||
" AIMessage(content='Your name is Nemo.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 5, 'prompt_tokens': 63, 'total_tokens': 68, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-a0b21acc-9dbb-4fb6-a953-392020f37d88-0', usage_metadata={'input_tokens': 63, 'output_tokens': 5, 'total_tokens': 68})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history = ChatMessageHistory()\n",
|
||||
"demo_ephemeral_chat_history = [\n",
|
||||
" HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content=\"Hello!\"),\n",
|
||||
" HumanMessage(content=\"How are you today?\"),\n",
|
||||
" AIMessage(content=\"Fine thanks!\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.add_user_message(\"Hey there! I'm Nemo.\")\n",
|
||||
"demo_ephemeral_chat_history.add_ai_message(\"Hello!\")\n",
|
||||
"demo_ephemeral_chat_history.add_user_message(\"How are you today?\")\n",
|
||||
"demo_ephemeral_chat_history.add_ai_message(\"Fine thanks!\")\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's use this message history with the `RunnableWithMessageHistory` chain we declared above:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 7ff2d8ec-65e2-4f67-8961-e498e2c4a591 not found for run 3881e990-6596-4326-84f6-2b76949e0657. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain_with_message_history.invoke(\n",
|
||||
" {\"input\": \"What's my name?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
"app.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": demo_ephemeral_chat_history\n",
|
||||
" + [HumanMessage(content=\"What's my name?\")]\n",
|
||||
" },\n",
|
||||
" config={\"configurable\": {\"thread_id\": \"2\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -447,35 +293,88 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see the chain remembers the preloaded name.\n",
|
||||
"We can see the app remembers the preloaded name.\n",
|
||||
"\n",
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the chain to only the 2 most recent ones. We can use the built in [trim_messages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:"
|
||||
"But let's say we have a very small context window, and we want to trim the number of messages passed to the model to only the 2 most recent ones. We can use the built in [trim_messages](/docs/how_to/trim_messages/) util to trim messages based on their token count before they reach our prompt. In this case we'll count each message as 1 \"token\" and keep only the last two messages:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import trim_messages\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, MessagesState, StateGraph\n",
|
||||
"\n",
|
||||
"# Define trimmer\n",
|
||||
"# highlight-start\n",
|
||||
"# count each message as 1 \"token\" (token_counter=len) and keep only the last two messages\n",
|
||||
"trimmer = trim_messages(strategy=\"last\", max_tokens=2, token_counter=len)\n",
|
||||
"# highlight-end\n",
|
||||
"\n",
|
||||
"chain_with_trimming = (\n",
|
||||
" RunnablePassthrough.assign(chat_history=itemgetter(\"chat_history\") | trimmer)\n",
|
||||
" | prompt\n",
|
||||
" | chat\n",
|
||||
")\n",
|
||||
"workflow = StateGraph(state_schema=MessagesState)\n",
|
||||
"\n",
|
||||
"chain_with_trimmed_history = RunnableWithMessageHistory(\n",
|
||||
" chain_with_trimming,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
"\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: MessagesState):\n",
|
||||
" # highlight-start\n",
|
||||
" trimmed_messages = trimmer.invoke(state[\"messages\"])\n",
|
||||
" system_prompt = (\n",
|
||||
" \"You are a helpful assistant. \"\n",
|
||||
" \"Answer all questions to the best of your ability.\"\n",
|
||||
" )\n",
|
||||
" messages = [SystemMessage(content=system_prompt)] + trimmed_messages\n",
|
||||
" # highlight-end\n",
|
||||
" response = model.invoke(messages)\n",
|
||||
" return {\"messages\": response}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the node and edge\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"\n",
|
||||
"# Add simple in-memory checkpointer\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's call this new app and check the response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'messages': [HumanMessage(content=\"Hey there! I'm Nemo.\", additional_kwargs={}, response_metadata={}, id='6b4cab70-ce18-49b0-bb06-267bde44e037'),\n",
|
||||
" AIMessage(content='Hello!', additional_kwargs={}, response_metadata={}, id='ba3714f4-8876-440b-a651-efdcab2fcb4c'),\n",
|
||||
" HumanMessage(content='How are you today?', additional_kwargs={}, response_metadata={}, id='08d032c0-1577-4862-a3f2-5c1b90687e21'),\n",
|
||||
" AIMessage(content='Fine thanks!', additional_kwargs={}, response_metadata={}, id='21790e16-db05-4537-9a6b-ecad0fcec436'),\n",
|
||||
" HumanMessage(content='What is my name?', additional_kwargs={}, response_metadata={}, id='a22ab7c5-8617-4821-b3e9-a9e7dca1ff78'),\n",
|
||||
" AIMessage(content=\"I'm sorry, but I don't have access to personal information about you unless you share it with me. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 39, 'total_tokens': 66, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-f7b32d72-9f57-4705-be7e-43bf1c3d293b-0', usage_metadata={'input_tokens': 39, 'output_tokens': 27, 'total_tokens': 66})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"app.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": demo_ephemeral_chat_history\n",
|
||||
" + [HumanMessage(content=\"What is my name?\")]\n",
|
||||
" },\n",
|
||||
" config={\"configurable\": {\"thread_id\": \"3\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -483,101 +382,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's call this new chain and check the messages afterwards:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run 775cde65-8d22-4c44-80bb-f0b9811c32ca not found for run 5cf71d0e-4663-41cd-8dbe-e9752689cfac. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"Where does P. Sherman live?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content='Hello!'),\n",
|
||||
" HumanMessage(content='How are you today?'),\n",
|
||||
" AIMessage(content='Fine thanks!'),\n",
|
||||
" HumanMessage(content=\"What's my name?\"),\n",
|
||||
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
|
||||
" HumanMessage(content='Where does P. Sherman live?'),\n",
|
||||
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And we can see that our history has removed the two oldest messages while still adding the most recent conversation at the end. The next time the chain is called, `trim_messages` will be called again, and only the two most recent messages will be passed to the model. In this case, this means that the model will forget the name we gave it the next time we invoke it:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Parent run fde7123f-6fd3-421a-a3fc-2fb37dead119 not found for run 061a4563-2394-470d-a3ed-9bf1388ca431. Treating as a root run.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_trimmed_history.invoke(\n",
|
||||
" {\"input\": \"What is my name?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
")"
|
||||
"We can see that `trim_messages` was called and only the two most recent messages will be passed to the model. In this case, this means that the model forgot the name we gave it."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -593,114 +398,84 @@
|
||||
"source": [
|
||||
"### Summary memory\n",
|
||||
"\n",
|
||||
"We can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our chain. Let's recreate our chat history and chatbot chain:"
|
||||
"We can use this same pattern in other ways too. For example, we could use an additional LLM call to generate a summary of the conversation before calling our app. Let's recreate our chat history:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content='Hello!'),\n",
|
||||
" HumanMessage(content='How are you today?'),\n",
|
||||
" AIMessage(content='Fine thanks!')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history = ChatMessageHistory()\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.add_user_message(\"Hey there! I'm Nemo.\")\n",
|
||||
"demo_ephemeral_chat_history.add_ai_message(\"Hello!\")\n",
|
||||
"demo_ephemeral_chat_history.add_user_message(\"How are you today?\")\n",
|
||||
"demo_ephemeral_chat_history.add_ai_message(\"Fine thanks!\")\n",
|
||||
"\n",
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
"demo_ephemeral_chat_history = [\n",
|
||||
" HumanMessage(content=\"Hey there! I'm Nemo.\"),\n",
|
||||
" AIMessage(content=\"Hello!\"),\n",
|
||||
" HumanMessage(content=\"How are you today?\"),\n",
|
||||
" AIMessage(content=\"Fine thanks!\"),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We'll slightly modify the prompt to make the LLM aware that will receive a condensed summary instead of a chat history:"
|
||||
"And now, let's update the model-calling function to distill previous interactions into a summary:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant. Answer all questions to the best of your ability. The provided chat history includes facts about the user you are speaking with.\",\n",
|
||||
" ),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"user\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"from langchain_core.messages import HumanMessage, RemoveMessage\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, MessagesState, StateGraph\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"workflow = StateGraph(state_schema=MessagesState)\n",
|
||||
"\n",
|
||||
"chain_with_message_history = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" lambda session_id: demo_ephemeral_chat_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now, let's create a function that will distill previous interactions into a summary. We can add this one to the front of the chain too:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def summarize_messages(chain_input):\n",
|
||||
" stored_messages = demo_ephemeral_chat_history.messages\n",
|
||||
" if len(stored_messages) == 0:\n",
|
||||
" return False\n",
|
||||
" summarization_prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" \"Distill the above chat messages into a single summary message. Include as many specific details as you can.\",\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: MessagesState):\n",
|
||||
" system_prompt = (\n",
|
||||
" \"You are a helpful assistant. \"\n",
|
||||
" \"Answer all questions to the best of your ability. \"\n",
|
||||
" \"The provided chat history includes a summary of the earlier conversation.\"\n",
|
||||
" )\n",
|
||||
" summarization_chain = summarization_prompt | chat\n",
|
||||
" system_message = SystemMessage(content=system_prompt)\n",
|
||||
" message_history = state[\"messages\"][:-1] # exclude the most recent user input\n",
|
||||
" # Summarize the messages if the chat history reaches a certain size\n",
|
||||
" if len(message_history) >= 4:\n",
|
||||
" last_human_message = state[\"messages\"][-1]\n",
|
||||
" # Invoke the model to generate conversation summary\n",
|
||||
" summary_prompt = (\n",
|
||||
" \"Distill the above chat messages into a single summary message. \"\n",
|
||||
" \"Include as many specific details as you can.\"\n",
|
||||
" )\n",
|
||||
" summary_message = model.invoke(\n",
|
||||
" message_history + [HumanMessage(content=summary_prompt)]\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" summary_message = summarization_chain.invoke({\"chat_history\": stored_messages})\n",
|
||||
" # Delete messages that we no longer want to show up\n",
|
||||
" delete_messages = [RemoveMessage(id=m.id) for m in state[\"messages\"]]\n",
|
||||
" # Re-add user message\n",
|
||||
" human_message = HumanMessage(content=last_human_message.content)\n",
|
||||
" # Call the model with summary & response\n",
|
||||
" response = model.invoke([system_message, summary_message, human_message])\n",
|
||||
" message_updates = [summary_message, human_message, response] + delete_messages\n",
|
||||
" else:\n",
|
||||
" message_updates = model.invoke([system_message] + state[\"messages\"])\n",
|
||||
"\n",
|
||||
" demo_ephemeral_chat_history.clear()\n",
|
||||
"\n",
|
||||
" demo_ephemeral_chat_history.add_message(summary_message)\n",
|
||||
"\n",
|
||||
" return True\n",
|
||||
" return {\"messages\": message_updates}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain_with_summarization = (\n",
|
||||
" RunnablePassthrough.assign(messages_summarized=summarize_messages)\n",
|
||||
" | chain_with_message_history\n",
|
||||
")"
|
||||
"# Define the node and edge\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"\n",
|
||||
"# Add simple in-memory checkpointer\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -712,54 +487,37 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You introduced yourself as Nemo. How can I assist you today, Nemo?')"
|
||||
"{'messages': [AIMessage(content=\"Nemo greeted me, and I responded positively, indicating that I'm doing well.\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 16, 'prompt_tokens': 60, 'total_tokens': 76, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-ee42f98d-907d-4bad-8f16-af2db789701d-0', usage_metadata={'input_tokens': 60, 'output_tokens': 16, 'total_tokens': 76}),\n",
|
||||
" HumanMessage(content='What did I say my name was?', additional_kwargs={}, response_metadata={}, id='788555ea-5b1f-4c29-a2f2-a92f15d147be'),\n",
|
||||
" AIMessage(content='You mentioned that your name is Nemo.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 67, 'total_tokens': 75, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_1bb46167f9', 'finish_reason': 'stop', 'logprobs': None}, id='run-099a43bd-a284-4969-bb6f-0be486614cd8-0', usage_metadata={'input_tokens': 67, 'output_tokens': 8, 'total_tokens': 75})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain_with_summarization.invoke(\n",
|
||||
" {\"input\": \"What did I say my name was?\"},\n",
|
||||
" {\"configurable\": {\"session_id\": \"unused\"}},\n",
|
||||
"app.invoke(\n",
|
||||
" {\n",
|
||||
" \"messages\": demo_ephemeral_chat_history\n",
|
||||
" + [HumanMessage(\"What did I say my name was?\")]\n",
|
||||
" },\n",
|
||||
" config={\"configurable\": {\"thread_id\": \"4\"}},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content='The conversation is between Nemo and an AI. Nemo introduces himself and the AI responds with a greeting. Nemo then asks the AI how it is doing, and the AI responds that it is fine.'),\n",
|
||||
" HumanMessage(content='What did I say my name was?'),\n",
|
||||
" AIMessage(content='You introduced yourself as Nemo. How can I assist you today, Nemo?')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"demo_ephemeral_chat_history.messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that invoking the chain again will generate another summary generated from the initial summary plus new messages and so on. You could also design a hybrid approach where a certain number of messages are retained in chat history while others are summarized."
|
||||
"Note that invoking the app again will keep accumulating the history until it reaches the specified number of messages (four in our case). At that point we will generate another summary generated from the initial summary plus new messages and so on."
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -779,7 +537,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@ -7,6 +7,18 @@
|
||||
"source": [
|
||||
"# How to add chat history\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"This guide previously used the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) abstraction. You can access this version of the documentation in the [v0.2 docs](https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/).\n",
|
||||
"\n",
|
||||
"As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n",
|
||||
"\n",
|
||||
"If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n",
|
||||
"\n",
|
||||
"Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n",
|
||||
"\n",
|
||||
"In this guide we focus on **adding logic for incorporating historical messages.**\n",
|
||||
@ -29,7 +41,7 @@
|
||||
"\n",
|
||||
"### Dependencies\n",
|
||||
"\n",
|
||||
"We'll use OpenAI embeddings and a Chroma vector store in this walkthrough, but everything shown here works with any [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers). \n",
|
||||
"We'll use OpenAI embeddings and an InMemory vector store in this walkthrough, but everything shown here works with any [Embeddings](/docs/concepts#embedding-models), and [VectorStore](/docs/concepts#vectorstores) or [Retriever](/docs/concepts#retrievers). \n",
|
||||
"\n",
|
||||
"We'll use the following packages:"
|
||||
]
|
||||
@ -42,7 +54,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install --upgrade --quiet langchain langchain-community langchain-chroma beautifulsoup4"
|
||||
"%pip install --upgrade --quiet langchain langchain-community beautifulsoup4"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -56,7 +68,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "143787ca-d8e6-4dc9-8281-4374f4d71720",
|
||||
"id": "3b156b76-22a1-43af-a509-137acdccc5d0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@ -64,11 +76,7 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"\n",
|
||||
"# import dotenv\n",
|
||||
"\n",
|
||||
"# dotenv.load_dotenv()"
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -153,7 +161,7 @@
|
||||
"id": "15f8ad59-19de-42e3-85a8-3ba95ee0bd43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For the retriever, we will use [WebBaseLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load the content of a web page. Here we instantiate a `Chroma` vectorstore and then use its [.as_retriever](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.as_retriever) method to build a retriever that can be incorporated into [LCEL](/docs/concepts/#langchain-expression-language) chains."
|
||||
"For the retriever, we will use [WebBaseLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load the content of a web page. Here we instantiate a `InMemoryVectorStore` vectorstore and then use its [.as_retriever](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.as_retriever) method to build a retriever that can be incorporated into [LCEL](/docs/concepts/#langchain-expression-language) chains."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -161,16 +169,24 @@
|
||||
"execution_count": 5,
|
||||
"id": "820244ae-74b4-4593-b392-822979dd91b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import bs4\n",
|
||||
"from langchain.chains import create_retrieval_chain\n",
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
@ -186,7 +202,8 @@
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
|
||||
"splits = text_splitter.split_documents(docs)\n",
|
||||
"vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n",
|
||||
"vectorstore = InMemoryVectorStore(embedding=OpenAIEmbeddings())\n",
|
||||
"vectorstore.add_documents(splits)\n",
|
||||
"retriever = vectorstore.as_retriever()"
|
||||
]
|
||||
},
|
||||
@ -286,8 +303,8 @@
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)\n",
|
||||
"\n",
|
||||
"question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)\n",
|
||||
"rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)"
|
||||
]
|
||||
},
|
||||
@ -296,20 +313,17 @@
|
||||
"id": "53a662c2-f38b-45f9-95c4-66de15637614",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Adding chat history\n",
|
||||
"### Stateful Management of chat history\n",
|
||||
"\n",
|
||||
"To manage the chat history, we will need:\n",
|
||||
"We have added application logic for incorporating chat history, but we are still manually plumbing it through our application. In production, the Q&A application we usually persist the chat history into a database, and be able to read and update it appropriately.\n",
|
||||
"\n",
|
||||
"1. An object for storing the chat history;\n",
|
||||
"2. An object that wraps our chain and manages updates to the chat history.\n",
|
||||
"[LangGraph](https://langchain-ai.github.io/langgraph/) implements a built-in [persistence layer](https://langchain-ai.github.io/langgraph/concepts/persistence/), making it ideal for chat applications that support multiple conversational turns.\n",
|
||||
"\n",
|
||||
"For these we will use [BaseChatMessageHistory](https://python.langchain.com/api_reference/core/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html) and [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html). The latter is a wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n",
|
||||
"Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n",
|
||||
"\n",
|
||||
"For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history/) LCEL how-to guide.\n",
|
||||
"LangGraph comes with a simple [in-memory checkpointer](https://langchain-ai.github.io/langgraph/reference/checkpoints/#memorysaver), which we use below. See its documentation for more detail, including how to use different persistence backends (e.g., SQLite or Postgres).\n",
|
||||
"\n",
|
||||
"Below, we implement a simple example of the second option, in which chat histories are stored in a simple dict. LangChain manages memory integrations with [Redis](/docs/integrations/memory/redis_chat_message_history/) and other technologies to provide for more robust persistence.\n",
|
||||
"\n",
|
||||
"Instances of `RunnableWithMessageHistory` manage the chat history for you. They accept a config with a key (`\"session_id\"` by default) that specifies what conversation history to fetch and prepend to the input, and append the output to the same conversation history. Below is an example:"
|
||||
"For a detailed walkthrough of how to manage message history, head to the How to add message history (memory) guide."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -319,26 +333,48 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from typing import Sequence\n",
|
||||
"\n",
|
||||
"store = {}\n",
|
||||
"from langchain_core.messages import AIMessage, BaseMessage, HumanMessage\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, StateGraph\n",
|
||||
"from langgraph.graph.message import add_messages\n",
|
||||
"from typing_extensions import Annotated, TypedDict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = ChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"# We define a dict representing the state of the application.\n",
|
||||
"# This state has the same input and output keys as `rag_chain`.\n",
|
||||
"class State(TypedDict):\n",
|
||||
" input: str\n",
|
||||
" chat_history: Annotated[Sequence[BaseMessage], add_messages]\n",
|
||||
" context: str\n",
|
||||
" answer: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"conversational_rag_chain = RunnableWithMessageHistory(\n",
|
||||
" rag_chain,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
" output_messages_key=\"answer\",\n",
|
||||
")"
|
||||
"# We then define a simple node that runs the `rag_chain`.\n",
|
||||
"# The `return` values of the node update the graph state, so here we just\n",
|
||||
"# update the chat history with the input message and response.\n",
|
||||
"def call_model(state: State):\n",
|
||||
" response = rag_chain.invoke(state)\n",
|
||||
" return {\n",
|
||||
" \"chat_history\": [\n",
|
||||
" HumanMessage(state[\"input\"]),\n",
|
||||
" AIMessage(response[\"answer\"]),\n",
|
||||
" ],\n",
|
||||
" \"context\": response[\"context\"],\n",
|
||||
" \"answer\": response[\"answer\"],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Our graph consists only of one node:\n",
|
||||
"workflow = StateGraph(state_schema=State)\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"# Finally, we compile the graph with a checkpointer object.\n",
|
||||
"# This persists the state, in this case in memory.\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -348,23 +384,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This process helps agents or models tackle difficult tasks by dividing them into more manageable subtasks. Task decomposition can be achieved through methods like Chain of Thought (CoT) or Tree of Thoughts, which guide the agent in thinking step by step or exploring multiple reasoning possibilities at each step.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_rag_chain.invoke(\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
|
||||
"\n",
|
||||
"result = app.invoke(\n",
|
||||
" {\"input\": \"What is Task Decomposition?\"},\n",
|
||||
" config={\n",
|
||||
" \"configurable\": {\"session_id\": \"abc123\"}\n",
|
||||
" }, # constructs a key \"abc123\" in `store`.\n",
|
||||
")[\"answer\"]"
|
||||
" config=config,\n",
|
||||
")\n",
|
||||
"print(result[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -374,21 +408,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions, or human inputs to break down complex tasks into smaller and more manageable steps. Additionally, task decomposition can involve utilizing resources like internet access for information gathering, long-term memory management, and GPT-3.5 powered agents for delegation of simple tasks.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"One way of task decomposition is by using Large Language Models (LLMs) with simple prompting, such as providing instructions like \"Steps for XYZ\" or asking about subgoals for achieving a specific task. This method leverages the power of LLMs to break down tasks into smaller components for easier handling. Additionally, task decomposition can also be done using task-specific instructions tailored to the nature of the task, like requesting a story outline for writing a novel.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_rag_chain.invoke(\n",
|
||||
" {\"input\": \"What are common ways of doing it?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")[\"answer\"]"
|
||||
"result = app.invoke(\n",
|
||||
" {\"input\": \"What is one way of doing it?\"},\n",
|
||||
" config=config,\n",
|
||||
")\n",
|
||||
"print(result[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -396,7 +428,7 @@
|
||||
"id": "3ab59258-84bc-4904-880e-2ebfebbca563",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The conversation history can be inspected in the `store` dict:"
|
||||
"The conversation history can be inspected via the state of the application:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -409,27 +441,25 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"User: What is Task Decomposition?\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"AI: Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable and easier to accomplish. This process can be done using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Task decomposition can be facilitated by providing simple prompts to a language model, task-specific instructions, or human inputs.\n",
|
||||
"What is Task Decomposition?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"User: What are common ways of doing it?\n",
|
||||
"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This process helps agents or models tackle difficult tasks by dividing them into more manageable subtasks. Task decomposition can be achieved through methods like Chain of Thought (CoT) or Tree of Thoughts, which guide the agent in thinking step by step or exploring multiple reasoning possibilities at each step.\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"AI: Task decomposition can be achieved through various methods, including using techniques like Chain of Thought (CoT) or Tree of Thoughts to guide the model in breaking down tasks effectively. Common ways of task decomposition include providing simple prompts to a language model, task-specific instructions, or human inputs to break down complex tasks into smaller and more manageable steps. Additionally, task decomposition can involve utilizing resources like internet access for information gathering, long-term memory management, and GPT-3.5 powered agents for delegation of simple tasks.\n",
|
||||
"\n"
|
||||
"What is one way of doing it?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"One way of task decomposition is by using Large Language Models (LLMs) with simple prompting, such as providing instructions like \"Steps for XYZ\" or asking about subgoals for achieving a specific task. This method leverages the power of LLMs to break down tasks into smaller components for easier handling. Additionally, task decomposition can also be done using task-specific instructions tailored to the nature of the task, like requesting a story outline for writing a novel.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessage\n",
|
||||
"\n",
|
||||
"for message in store[\"abc123\"].messages:\n",
|
||||
" if isinstance(message, AIMessage):\n",
|
||||
" prefix = \"AI\"\n",
|
||||
" else:\n",
|
||||
" prefix = \"User\"\n",
|
||||
"\n",
|
||||
" print(f\"{prefix}: {message.content}\\n\")"
|
||||
"chat_history = app.get_state(config).values[\"chat_history\"]\n",
|
||||
"for message in chat_history:\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -457,17 +487,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Sequence\n",
|
||||
"\n",
|
||||
"import bs4\n",
|
||||
"from langchain.chains import create_history_aware_retriever, create_retrieval_chain\n",
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.messages import AIMessage, BaseMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, StateGraph\n",
|
||||
"from langgraph.graph.message import add_messages\n",
|
||||
"from typing_extensions import Annotated, TypedDict\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
|
||||
"\n",
|
||||
@ -485,7 +520,9 @@
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
|
||||
"splits = text_splitter.split_documents(docs)\n",
|
||||
"vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore(embedding=OpenAIEmbeddings())\n",
|
||||
"vectorstore.add_documents(documents=splits)\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@ -532,22 +569,41 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"### Statefully manage chat history ###\n",
|
||||
"store = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = ChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"# We define a dict representing the state of the application.\n",
|
||||
"# This state has the same input and output keys as `rag_chain`.\n",
|
||||
"class State(TypedDict):\n",
|
||||
" input: str\n",
|
||||
" chat_history: Annotated[Sequence[BaseMessage], add_messages]\n",
|
||||
" context: str\n",
|
||||
" answer: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"conversational_rag_chain = RunnableWithMessageHistory(\n",
|
||||
" rag_chain,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
" output_messages_key=\"answer\",\n",
|
||||
")"
|
||||
"# We then define a simple node that runs the `rag_chain`.\n",
|
||||
"# The `return` values of the node update the graph state, so here we just\n",
|
||||
"# update the chat history with the input message and response.\n",
|
||||
"def call_model(state: State):\n",
|
||||
" response = rag_chain.invoke(state)\n",
|
||||
" return {\n",
|
||||
" \"chat_history\": [\n",
|
||||
" HumanMessage(state[\"input\"]),\n",
|
||||
" AIMessage(response[\"answer\"]),\n",
|
||||
" ],\n",
|
||||
" \"context\": response[\"context\"],\n",
|
||||
" \"answer\": response[\"answer\"],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Our graph consists only of one node:\n",
|
||||
"workflow = StateGraph(state_schema=State)\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"# Finally, we compile the graph with a checkpointer object.\n",
|
||||
"# This persists the state, in this case in memory.\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -557,23 +613,21 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition involves breaking down a complex task into smaller and simpler steps to make it more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help in decomposing hard tasks into multiple manageable tasks by instructing models to think step by step and explore multiple reasoning possibilities at each step. Task decomposition can be achieved through various methods such as using prompting techniques, task-specific instructions, or human inputs.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This process helps agents or models handle difficult tasks by dividing them into more manageable subtasks. Different methods like Chain of Thought and Tree of Thoughts are used to decompose tasks into multiple steps, enhancing performance and aiding in the interpretation of the thinking process.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_rag_chain.invoke(\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
|
||||
"\n",
|
||||
"result = app.invoke(\n",
|
||||
" {\"input\": \"What is Task Decomposition?\"},\n",
|
||||
" config={\n",
|
||||
" \"configurable\": {\"session_id\": \"abc123\"}\n",
|
||||
" }, # constructs a key \"abc123\" in `store`.\n",
|
||||
")[\"answer\"]"
|
||||
" config=config,\n",
|
||||
")\n",
|
||||
"print(result[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -583,21 +637,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition can be done in common ways such as using prompting techniques like Chain of Thought (CoT) or Tree of Thoughts, which instruct models to think step by step and explore multiple reasoning possibilities at each step. Another way is to provide task-specific instructions, such as asking to \"Write a story outline\" for writing a novel, to guide the decomposition process. Additionally, task decomposition can also involve human inputs to break down complex tasks into smaller and simpler steps.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"One way of task decomposition is by using Large Language Models (LLMs) with simple prompting, such as providing instructions like \"Steps for XYZ\" or asking about subgoals for achieving a specific task. This method leverages the power of LLMs to break down tasks into smaller components for easier handling and processing.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_rag_chain.invoke(\n",
|
||||
" {\"input\": \"What are common ways of doing it?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")[\"answer\"]"
|
||||
"result = app.invoke(\n",
|
||||
" {\"input\": \"What is one way of doing it?\"},\n",
|
||||
" config=config,\n",
|
||||
")\n",
|
||||
"print(result[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -670,22 +722,11 @@
|
||||
"id": "52ae46d9-43f7-481b-96d5-df750be3ad65",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 5cd28d13-88dd-4eac-a465-3770ac27eff6, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_TbhPPPN05GKi36HLeaN4QM90', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2e60d910-879a-4a2a-b1e9-6a6c5c7d7ebc-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_TbhPPPN05GKi36HLeaN4QM90'}])]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_TbhPPPN05GKi36HLeaN4QM90')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in transforming big tasks into multiple manageable tasks, making it easier for autonomous agents to handle and interpret the thinking process. One common method for task decomposition is the Chain of Thought (CoT) technique, where models are instructed to \"think step by step\" to decompose hard tasks. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of multiple thoughts per step. Task decomposition can be facilitated through various methods such as using simple prompts, task-specific instructions, or human inputs.', response_metadata={'token_usage': {'completion_tokens': 130, 'prompt_tokens': 636, 'total_tokens': 766}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-3ef17638-65df-4030-a7fe-795e6da91c69-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a problem-solving strategy that involves breaking down a complex task or problem into smaller, more manageable subtasks. By decomposing a task into smaller components, it becomes easier to understand, analyze, and solve the overall problem. This approach allows individuals to focus on one specific aspect of the task at a time, leading to a more systematic and organized problem-solving process. Task decomposition is commonly used in various fields such as project management, software development, and engineering to simplify complex tasks and improve efficiency.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 102, 'prompt_tokens': 68, 'total_tokens': 170, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-a0925ffd-f500-4677-a108-c7015987e9ae-0', usage_metadata={'input_tokens': 68, 'output_tokens': 102, 'total_tokens': 170})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@ -746,7 +787,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-1cd17562-18aa-4839-b41b-403b17a0fc20-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d9011a17-9dbb-4348-9a58-ff89419a4bca-0', usage_metadata={'input_tokens': 67, 'output_tokens': 11, 'total_tokens': 78})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@ -775,22 +816,15 @@
|
||||
"id": "e2c570ae-dd91-402c-8693-ae746de63b16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID c54381c0-c5d9-495a-91a0-aca4ae755663, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_rg7zKTE5e0ICxVSslJ1u9LMg', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-122bf097-7ff1-49aa-b430-e362b51354ad-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_rg7zKTE5e0ICxVSslJ1u9LMg'}])]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_qVHvDTfYmWqcbgVhTwsH03aJ', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-bf9df2a6-ad56-43af-8d57-16f850accfd1-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_qVHvDTfYmWqcbgVhTwsH03aJ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 91, 'output_tokens': 19, 'total_tokens': 110})]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_rg7zKTE5e0ICxVSslJ1u9LMg')]}}\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:', name='blog_post_retriever', id='742ab53d-6f34-4607-bde7-13f2d75e0055', tool_call_id='call_qVHvDTfYmWqcbgVhTwsH03aJ')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in managing and solving intricate problems by dividing them into more manageable components. By decomposing tasks, agents or models can better understand the steps involved and plan their actions accordingly. Techniques like Chain of Thought (CoT) and Tree of Thoughts are examples of methods that enhance model performance on complex tasks by breaking them down into smaller steps.', response_metadata={'token_usage': {'completion_tokens': 87, 'prompt_tokens': 659, 'total_tokens': 746}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-b9166386-83e5-4b82-9a4b-590e5fa76671-0')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used in autonomous agent systems to break down complex tasks into smaller and simpler steps. This approach helps the agent to manage and execute tasks more effectively by dividing them into manageable subtasks. One common method for task decomposition is the Chain of Thought (CoT) technique, which prompts the model to think step by step and decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step by creating a tree structure of thought steps.\\n\\nTask decomposition can be achieved through various methods, such as using language models with simple prompting, task-specific instructions, or human inputs. By breaking down tasks into smaller components, autonomous agents can plan and execute tasks more efficiently.\\n\\nIf you would like more detailed information or examples related to task decomposition, feel free to ask!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 168, 'prompt_tokens': 611, 'total_tokens': 779, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0f51a1cf-ff0a-474a-93f5-acf54e0d8cd6-0', usage_metadata={'input_tokens': 611, 'output_tokens': 168, 'total_tokens': 779})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@ -825,24 +859,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6kbxTU5CDWLmF9mrvR7bWSkI', 'function': {'arguments': '{\"query\":\"Common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 769, 'total_tokens': 790}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2d2c8327-35cd-484a-b8fd-52436657c2d8-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Common ways of task decomposition'}, 'id': 'call_6kbxTU5CDWLmF9mrvR7bWSkI'}])]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 29553415-e0f4-41a9-8921-ba489e377f68, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_6kbxTU5CDWLmF9mrvR7bWSkI')]}}\n",
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_n7vUrFacrvl5wUGmz5EGpmCS', 'function': {'arguments': '{\"query\":\"Common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 802, 'total_tokens': 823, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-4d949be3-00e5-49e5-af26-6a217efc8858-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Common ways of task decomposition'}, 'id': 'call_n7vUrFacrvl5wUGmz5EGpmCS', 'type': 'tool_call'}], usage_metadata={'input_tokens': 802, 'output_tokens': 21, 'total_tokens': 823})]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Common ways of task decomposition include:\\n1. Using LLM with simple prompting like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\"\\n2. Using task-specific instructions, for example, \"Write a story outline\" for writing a novel.\\n3. Involving human inputs in the task decomposition process.', response_metadata={'token_usage': {'completion_tokens': 67, 'prompt_tokens': 1339, 'total_tokens': 1406}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ad14cde-ca75-4238-a868-f865e0fc50dd-0')]}}\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.', name='blog_post_retriever', id='90fcbc1e-0736-47bc-9a96-347ad837e0e3', tool_call_id='call_n7vUrFacrvl5wUGmz5EGpmCS')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='According to the blog post, common ways of task decomposition include:\\n\\n1. Using Language Models (LLM) with Simple Prompting: Language models can be utilized with simple prompts like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\" to break down tasks into smaller steps.\\n\\n2. Task-Specific Instructions: Providing task-specific instructions to guide the decomposition process. For example, using instructions like \"Write a story outline\" for writing a novel can help in breaking down the task effectively.\\n\\n3. Human Inputs: Involving human inputs in the task decomposition process. Human insights and expertise can contribute to breaking down complex tasks into manageable subtasks.\\n\\nThese methods of task decomposition help autonomous agents in planning and executing tasks more efficiently by breaking them down into smaller and simpler components.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 160, 'prompt_tokens': 1347, 'total_tokens': 1507, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-087ce1b5-f897-40d0-8ef4-eb1c6852a835-0', usage_metadata={'input_tokens': 1347, 'output_tokens': 160, 'total_tokens': 1507})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
@ -877,18 +898,27 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 1,
|
||||
"id": "b1d2b4d4-e604-497d-873d-d345b808578e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import bs4\n",
|
||||
"from langchain.tools.retriever import create_retriever_tool\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
|
||||
@ -907,7 +937,8 @@
|
||||
"\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
|
||||
"splits = text_splitter.split_documents(docs)\n",
|
||||
"vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n",
|
||||
"vectorstore = InMemoryVectorStore(embedding=OpenAIEmbeddings())\n",
|
||||
"vectorstore.add_documents(documents=splits)\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@ -959,7 +990,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -2,7 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b5ee5b75-6876-4d62-9ade-5a7a808ae5a2",
|
||||
"id": "eaad9a82-0592-4315-9931-0621054bdd0e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to trim messages\n",
|
||||
@ -22,37 +22,83 @@
|
||||
"\n",
|
||||
"All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model.\n",
|
||||
"\n",
|
||||
"The `trim_messages` util provides some basic strategies for trimming a list of messages to be of a certain token length.\n",
|
||||
"[trim_messages](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.utils.trim_messages.html) can be used to reduce the size of a chat history to a specified token count or specified message count.\n",
|
||||
"\n",
|
||||
"## Getting the last `max_tokens` tokens\n",
|
||||
"\n",
|
||||
"To get the last `max_tokens` in the list of Messages we can set `strategy=\"last\"`. Notice that for our `token_counter` we can pass in a function (more on that below) or a language model (since language models have a message token counting method). It makes sense to pass in a model when you're trimming your messages to fit into the context window of that specific model:"
|
||||
"If passing the trimmed chat history back into a chat model directly, the trimmed chat history should satisfy the following properties:\n",
|
||||
"\n",
|
||||
"1. The resulting chat history should be **valid**. Usually this means that the following properties should be satisfied:\n",
|
||||
" - The chat history **starts** with either (1) a `HumanMessage` or (2) a [SystemMessage](/docs/concepts/#systemmessage) followed by a `HumanMessage`.\n",
|
||||
" - The chat history **ends** with either a `HumanMessage` or a `ToolMessage`.\n",
|
||||
" - A `ToolMessage` can only appear after an `AIMessage` that involved a tool call. \n",
|
||||
" This can be achieved by setting `start_on=\"human\"` and `ends_on=(\"human\", \"tool\")`.\n",
|
||||
"3. It includes recent messages and drops old messages in the chat history.\n",
|
||||
" This can be achieved by setting `strategy=\"last\"`.\n",
|
||||
"4. Usually, the new chat history should include the `SystemMessage` if it\n",
|
||||
" was present in the original chat history since the `SystemMessage` includes\n",
|
||||
" special instructions to the chat model. The `SystemMessage` is almost always\n",
|
||||
" the first message in the history if present. This can be achieved by setting\n",
|
||||
" `include_system=True`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e4bffc37-78c0-46c3-ad0c-b44de0ed3e90",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Trimming based on token count\n",
|
||||
"\n",
|
||||
"Here, we'll trim the chat history based on token count. The trimmed chat history will produce a **valid** chat history that includes the `SystemMessage`.\n",
|
||||
"\n",
|
||||
"To keep the most recent messages, we set `strategy=\"last\"`. We'll also set `include_system=True` to include the `SystemMessage`, and `start_on=\"human\"` to make sure the resulting chat history is valid. \n",
|
||||
"\n",
|
||||
"This is a good default configuration when using `trim_messages` based on token count. Remember to adjust `token_counter` and `max_tokens` for your use case.\n",
|
||||
"\n",
|
||||
"Notice that for our `token_counter` we can pass in a function (more on that below) or a language model (since language models have a message token counting method). It makes sense to pass in a model when you're trimming your messages to fit into the context window of that specific model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "c974633b-3bd0-4844-8a8f-85e3e25f13fe",
|
||||
"id": "c91edeb2-9978-4665-9fdb-fc96cdb51caa",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pip install -qU langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "40ea972c-d424-4bc4-9f2e-82f01c3d7598",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# pip install -U langchain-openai\n",
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" ToolMessage,\n",
|
||||
" trim_messages,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
@ -70,36 +116,69 @@
|
||||
" HumanMessage(\"what do you call a speechless parrot\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=45,\n",
|
||||
" # Keep the last <= n_count tokens of the messages.\n",
|
||||
" strategy=\"last\",\n",
|
||||
" # highlight-start\n",
|
||||
" # Remember to adjust based on your model\n",
|
||||
" # or else pass a custom token_encoder\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
" # highlight-end\n",
|
||||
" # Most chat models expect that chat history starts with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a SystemMessage followed by a HumanMessage\n",
|
||||
" # highlight-start\n",
|
||||
" # Remember to adjust based on the desired conversation\n",
|
||||
" # length\n",
|
||||
" max_tokens=45,\n",
|
||||
" # highlight-end\n",
|
||||
" # Most chat models expect that chat history starts with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a SystemMessage followed by a HumanMessage\n",
|
||||
" start_on=\"human\",\n",
|
||||
" # Most chat models expect that chat history ends with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a ToolMessage\n",
|
||||
" end_on=(\"human\", \"tool\"),\n",
|
||||
" # Usually, we want to keep the SystemMessage\n",
|
||||
" # if it's present in the original history.\n",
|
||||
" # The SystemMessage has special instructions for the model.\n",
|
||||
" include_system=True,\n",
|
||||
" allow_partial=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d3f46654-c4b2-4136-b995-91c3febe5bf9",
|
||||
"id": "28fcfc94-0d4a-415c-9506-8ae7634253a2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we want to always keep the initial system message we can specify `include_system=True`:"
|
||||
"## Trimming based on message count\n",
|
||||
"\n",
|
||||
"Alternatively, we can trim the chat history based on **message count**, by setting `token_counter=len`. In this case, each message will count as a single token, and `max_tokens` will control\n",
|
||||
"the maximum number of messages.\n",
|
||||
"\n",
|
||||
"This is a good default configuration when using `trim_messages` based on message count. Remember to adjust `max_tokens` for your use case."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "589b0223-3a73-44ec-8315-2dba3ee6117d",
|
||||
"execution_count": 3,
|
||||
"id": "c8fdedae-0e6b-4901-a222-81fc95e265c2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" HumanMessage(content='and who is harrison chasing anyways', additional_kwargs={}, response_metadata={}),\n",
|
||||
" AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -107,36 +186,59 @@
|
||||
"source": [
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=45,\n",
|
||||
" # Keep the last <= n_count tokens of the messages.\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
" # highlight-next-line\n",
|
||||
" token_counter=len,\n",
|
||||
" # When token_counter=len, each message\n",
|
||||
" # will be counted as a single token.\n",
|
||||
" # highlight-start\n",
|
||||
" # Remember to adjust for your use case\n",
|
||||
" max_tokens=5,\n",
|
||||
" # highlight-end\n",
|
||||
" # Most chat models expect that chat history starts with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a SystemMessage followed by a HumanMessage\n",
|
||||
" start_on=\"human\",\n",
|
||||
" # Most chat models expect that chat history ends with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a ToolMessage\n",
|
||||
" end_on=(\"human\", \"tool\"),\n",
|
||||
" # Usually, we want to keep the SystemMessage\n",
|
||||
" # if it's present in the original history.\n",
|
||||
" # The SystemMessage has special instructions for the model.\n",
|
||||
" include_system=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "8a8b542c-04d1-4515-8d82-b999ea4fac4f",
|
||||
"id": "9367857f-7f9a-4d17-9f9c-6ffc5aae909c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Advanced Usage\n",
|
||||
"\n",
|
||||
"You can use `trim_message` as a building-block to create more complex processing logic.\n",
|
||||
"\n",
|
||||
"If we want to allow splitting up the contents of a message we can specify `allow_partial=True`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8c46a209-dddd-4d01-81f6-f6ae55d3225c",
|
||||
"execution_count": 4,
|
||||
"id": "8bcca1fe-674c-4713-bacc-8e8e6d6f56c3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" AIMessage(content=\"\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" AIMessage(content=\"\\nWhy, he's probably chasing after the last cup of coffee in the office!\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -154,26 +256,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "306adf9c-41cd-495c-b4dc-e4f43dd7f8f8",
|
||||
"id": "245bee9b-e515-4e89-8f2a-84bda9a25de8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we need to make sure that our first message (excluding the system message) is always of a specific type, we can specify `start_on`:"
|
||||
"By default, the `SystemMessage` will not be included, so you can drop it by either setting `include_system=False` or by dropping the `include_system` argument."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "878a730b-fe44-4e9d-ab65-7b8f7b069de8",
|
||||
"execution_count": 5,
|
||||
"id": "94351736-28a1-44a3-aac7-82356c81d171",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
"[AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -181,11 +283,9 @@
|
||||
"source": [
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=60,\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
" include_system=True,\n",
|
||||
" start_on=\"human\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -194,25 +294,23 @@
|
||||
"id": "7f5d391d-235b-4091-b2de-c22866b478f3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Getting the first `max_tokens` tokens\n",
|
||||
"\n",
|
||||
"We can perform the flipped operation of getting the *first* `max_tokens` by specifying `strategy=\"first\"`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"id": "5f56ae54-1a39-4019-9351-3b494c003d5b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(content=\"i wonder why it's called langchain\")]"
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" HumanMessage(content=\"i wonder why it's called langchain\", additional_kwargs={}, response_metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -238,18 +336,36 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 7,
|
||||
"id": "d930c089-e8e6-4980-9d39-11d41e794772",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pip install -qU tiktoken"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "1c1c3b1e-2ece-49e7-a3b6-e69877c1633b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -257,7 +373,6 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"# pip install tiktoken\n",
|
||||
"import tiktoken\n",
|
||||
"from langchain_core.messages import BaseMessage, ToolMessage\n",
|
||||
"\n",
|
||||
@ -298,9 +413,28 @@
|
||||
"\n",
|
||||
"trim_messages(\n",
|
||||
" messages,\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" # highlight-next-line\n",
|
||||
" token_counter=tiktoken_counter,\n",
|
||||
" # Keep the last <= n_count tokens of the messages.\n",
|
||||
" strategy=\"last\",\n",
|
||||
" # When token_counter=len, each message\n",
|
||||
" # will be counted as a single token.\n",
|
||||
" # highlight-start\n",
|
||||
" # Remember to adjust for your use case\n",
|
||||
" max_tokens=45,\n",
|
||||
" # highlight-end\n",
|
||||
" # Most chat models expect that chat history starts with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a SystemMessage followed by a HumanMessage\n",
|
||||
" start_on=\"human\",\n",
|
||||
" # Most chat models expect that chat history ends with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a ToolMessage\n",
|
||||
" end_on=(\"human\", \"tool\"),\n",
|
||||
" # Usually, we want to keep the SystemMessage\n",
|
||||
" # if it's present in the original history.\n",
|
||||
" # The SystemMessage has special instructions for the model.\n",
|
||||
" include_system=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@ -311,22 +445,22 @@
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"`trim_messages` can be used in an imperatively (like above) or declaratively, making it easy to compose with other components in a chain"
|
||||
"`trim_messages` can be used imperatively (like above) or declaratively, making it easy to compose with other components in a chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 9,
|
||||
"id": "96aa29b2-01e0-437c-a1ab-02fb0141cb57",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='A: A \"Polly-gone\"!', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 32, 'total_tokens': 41}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_66b29dffce', 'finish_reason': 'stop', 'logprobs': None}, id='run-83e96ddf-bcaa-4f63-824c-98b0f8a0d474-0', usage_metadata={'input_tokens': 32, 'output_tokens': 9, 'total_tokens': 41})"
|
||||
"AIMessage(content='A polygon! Because it\\'s a \"poly-gone\" quiet!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 32, 'total_tokens': 45, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_057232b607', 'finish_reason': 'stop', 'logprobs': None}, id='run-4fa026e7-9137-4fef-b596-54243615e3b3-0', usage_metadata={'input_tokens': 32, 'output_tokens': 13, 'total_tokens': 45})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -337,9 +471,24 @@
|
||||
"# Notice we don't pass in messages. This creates\n",
|
||||
"# a RunnableLambda that takes messages as input\n",
|
||||
"trimmer = trim_messages(\n",
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=llm,\n",
|
||||
" # Keep the last <= n_count tokens of the messages.\n",
|
||||
" strategy=\"last\",\n",
|
||||
" # When token_counter=len, each message\n",
|
||||
" # will be counted as a single token.\n",
|
||||
" # Remember to adjust for your use case\n",
|
||||
" max_tokens=45,\n",
|
||||
" # Most chat models expect that chat history starts with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a SystemMessage followed by a HumanMessage\n",
|
||||
" start_on=\"human\",\n",
|
||||
" # Most chat models expect that chat history ends with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a ToolMessage\n",
|
||||
" end_on=(\"human\", \"tool\"),\n",
|
||||
" # Usually, we want to keep the SystemMessage\n",
|
||||
" # if it's present in the original history.\n",
|
||||
" # The SystemMessage has special instructions for the model.\n",
|
||||
" include_system=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
@ -359,18 +508,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"id": "1ff02d0a-353d-4fac-a77c-7c2c5262abd9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot')]"
|
||||
"[SystemMessage(content=\"you're a good assistant, you always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n",
|
||||
" HumanMessage(content='what do you call a speechless parrot', additional_kwargs={}, response_metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -391,17 +540,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 11,
|
||||
"id": "a9517858-fc2f-4dc3-898d-bf98a0e905a0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='A \"polly-no-wanna-cracker\"!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 32, 'total_tokens': 42}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_5bf7397cd3', 'finish_reason': 'stop', 'logprobs': None}, id='run-054dd309-3497-4e7b-b22a-c1859f11d32e-0', usage_metadata={'input_tokens': 32, 'output_tokens': 10, 'total_tokens': 42})"
|
||||
"AIMessage(content='A \"polygon\"!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 4, 'prompt_tokens': 32, 'total_tokens': 36, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_c17d3befe7', 'finish_reason': 'stop', 'logprobs': None}, id='run-71d9fce6-bb0c-4bb3-acc8-d5eaee6ae7bc-0', usage_metadata={'input_tokens': 32, 'output_tokens': 4, 'total_tokens': 36})"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -425,7 +574,15 @@
|
||||
" max_tokens=45,\n",
|
||||
" strategy=\"last\",\n",
|
||||
" token_counter=llm,\n",
|
||||
" # Usually, we want to keep the SystemMessage\n",
|
||||
" # if it's present in the original history.\n",
|
||||
" # The SystemMessage has special instructions for the model.\n",
|
||||
" include_system=True,\n",
|
||||
" # Most chat models expect that chat history starts with either:\n",
|
||||
" # (1) a HumanMessage or\n",
|
||||
" # (2) a SystemMessage followed by a HumanMessage\n",
|
||||
" # start_on=\"human\" makes sure we produce a valid chat history\n",
|
||||
" start_on=\"human\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = trimmer | llm\n",
|
||||
@ -471,7 +628,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -37,8 +37,8 @@
|
||||
"\n",
|
||||
"We will cover two approaches:\n",
|
||||
"\n",
|
||||
"1. Chains, in which we always execute a retrieval step;\n",
|
||||
"2. Agents, in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n",
|
||||
"1. [Chains](/docs/tutorials/qa_chat_history/#chains), in which we always execute a retrieval step;\n",
|
||||
"2. [Agents](/docs/tutorials/qa_chat_history/#agents), in which we give an LLM discretion over whether and how to execute a retrieval step (or multiple steps).\n",
|
||||
"\n",
|
||||
"For the external knowledge source, we will use the same [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag)."
|
||||
]
|
||||
@ -87,16 +87,13 @@
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"\n",
|
||||
"# import dotenv\n",
|
||||
"\n",
|
||||
"# dotenv.load_dotenv()"
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "1665e740-ce01-4f09-b9ed-516db0bd326f",
|
||||
"id": "e207ac1d-4a8e-4172-a9ee-3294519a9a40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### LangSmith\n",
|
||||
@ -107,8 +104,8 @@
|
||||
"\n",
|
||||
"```python\n",
|
||||
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||
"if not os.environ.get(\"LANGSMITH_API_KEY\"):\n",
|
||||
" os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass()\n",
|
||||
"if not os.environ.get(\"LANGCHAIN_API_KEY\"):\n",
|
||||
" os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
@ -134,7 +131,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"id": "cb58f273-2111-4a9b-8932-9b64c95030c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -144,12 +141,12 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)"
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"id": "820244ae-74b4-4593-b392-822979dd91b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -206,17 +203,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"id": "bf55faaf-0d17-4b74-925d-c478b555f7b2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable for an agent or model. This process helps in guiding the agent through the various subgoals required to achieve the overall task efficiently. Different techniques like Chain of Thought and Tree of Thoughts can be used to decompose tasks into step-by-step processes, enhancing performance and understanding of the model's thinking process.\""
|
||||
"\"Task decomposition is the process of breaking down a complicated task into smaller, more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts enhance this process by guiding models to think step by step and explore multiple reasoning possibilities. This approach helps in simplifying complex tasks and provides insight into the model's reasoning.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -278,7 +275,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 5,
|
||||
"id": "2b685428-8b82-4af1-be4f-7232c5d55b73",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -322,7 +319,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 6,
|
||||
"id": "66f275f3-ddef-4678-b90d-ee64576878f9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -354,7 +351,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 7,
|
||||
"id": "0005810b-1b95-4666-a795-08d80e478b83",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -362,7 +359,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Task decomposition can be achieved through various methods such as using techniques like Chain of Thought (CoT) or Tree of Thoughts to break down complex tasks into smaller steps. Common ways include prompting the model with simple instructions like \"Steps for XYZ\" or task-specific instructions like \"Write a story outline.\" Human inputs can also be used to guide the task decomposition process effectively.\n"
|
||||
"Common ways of task decomposition include using simple prompting techniques, such as asking for \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\" Additionally, task-specific instructions can be employed, like \"Write a story outline\" for writing tasks, or human inputs can guide the decomposition process.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -393,7 +390,7 @@
|
||||
"source": [
|
||||
":::tip\n",
|
||||
"\n",
|
||||
"Check out the [LangSmith trace](https://smith.langchain.com/public/243301e4-4cc5-4e52-a6e7-8cfe9208398d/r) \n",
|
||||
"Check out the [LangSmith trace](https://smith.langchain.com/public/243301e4-4cc5-4e52-a6e7-8cfe9208398d/r).\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
@ -405,97 +402,131 @@
|
||||
"source": [
|
||||
"#### Stateful management of chat history\n",
|
||||
"\n",
|
||||
"Here we've gone over how to add application logic for incorporating historical outputs, but we're still manually updating the chat history and inserting it into each input. In a real Q&A application we'll want some way of persisting chat history and some way of automatically inserting and updating it.\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"For this we can use:\n",
|
||||
"This section of the tutorial previously used the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) abstraction. You can access that version of the documentation in the [v0.2 docs](https://python.langchain.com/v0.2/docs/tutorials/chatbot/).\n",
|
||||
"\n",
|
||||
"- [BaseChatMessageHistory](https://python.langchain.com/api_reference/langchain/index.html#module-langchain.memory): Store chat history.\n",
|
||||
"- [RunnableWithMessageHistory](/docs/how_to/message_history): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n",
|
||||
"As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into new LangChain applications.\n",
|
||||
"\n",
|
||||
"For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history) LCEL page.\n",
|
||||
"If your code is already relying on `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do **not** need to make any changes. We do not plan on deprecating this functionality in the near future as it works for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.\n",
|
||||
"\n",
|
||||
"Below, we implement a simple example of the second option, in which chat histories are stored in a simple dict. LangChain manages memory integrations with [Redis](/docs/integrations/memory/redis_chat_message_history/) and other technologies to provide for more robust persistence.\n",
|
||||
"Please see [How to migrate to LangGraph Memory](/docs/versions/migrating_memory/) for more details.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Instances of `RunnableWithMessageHistory` manage the chat history for you. They accept a config with a key (`\"session_id\"` by default) that specifies what conversation history to fetch and prepend to the input, and append the output to the same conversation history. Below is an example:"
|
||||
"We have added application logic for incorporating chat history, but we are still manually plumbing it through our application. In production, the Q&A application will usually persist the chat history into a database, and be able to read and update it appropriately.\n",
|
||||
"\n",
|
||||
"[LangGraph](https://langchain-ai.github.io/langgraph/) implements a built-in [persistence layer](https://langchain-ai.github.io/langgraph/concepts/persistence/), making it ideal for chat applications that support multiple conversational turns.\n",
|
||||
"\n",
|
||||
"Wrapping our chat model in a minimal LangGraph application allows us to automatically persist the message history, simplifying the development of multi-turn applications.\n",
|
||||
"\n",
|
||||
"LangGraph comes with a simple in-memory checkpointer, which we use below. See its [documentation](https://langchain-ai.github.io/langgraph/concepts/persistence/) for more detail, including how to use different persistence backends (e.g., SQLite or Postgres).\n",
|
||||
"\n",
|
||||
"For a detailed walkthrough of how to manage message history, head to the [How to add message history (memory)](/docs/how_to/message_history) guide."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "9c3fb176-8d6a-4dc7-8408-6a22c5f7cc72",
|
||||
"execution_count": 8,
|
||||
"id": "817f8528-ead4-47cd-a4b8-7a1cb8a6641f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from typing import Sequence\n",
|
||||
"\n",
|
||||
"store = {}\n",
|
||||
"from langchain_core.messages import BaseMessage\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, StateGraph\n",
|
||||
"from langgraph.graph.message import add_messages\n",
|
||||
"from typing_extensions import Annotated, TypedDict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = ChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"# We define a dict representing the state of the application.\n",
|
||||
"# This state has the same input and output keys as `rag_chain`.\n",
|
||||
"class State(TypedDict):\n",
|
||||
" input: str\n",
|
||||
" chat_history: Annotated[Sequence[BaseMessage], add_messages]\n",
|
||||
" context: str\n",
|
||||
" answer: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"conversational_rag_chain = RunnableWithMessageHistory(\n",
|
||||
" rag_chain,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
" output_messages_key=\"answer\",\n",
|
||||
")"
|
||||
"# We then define a simple node that runs the `rag_chain`.\n",
|
||||
"# The `return` values of the node update the graph state, so here we just\n",
|
||||
"# update the chat history with the input message and response.\n",
|
||||
"def call_model(state: State):\n",
|
||||
" response = rag_chain.invoke(state)\n",
|
||||
" return {\n",
|
||||
" \"chat_history\": [\n",
|
||||
" HumanMessage(state[\"input\"]),\n",
|
||||
" AIMessage(response[\"answer\"]),\n",
|
||||
" ],\n",
|
||||
" \"context\": response[\"context\"],\n",
|
||||
" \"answer\": response[\"answer\"],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Our graph consists only of one node:\n",
|
||||
"workflow = StateGraph(state_schema=State)\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"# Finally, we compile the graph with a checkpointer object.\n",
|
||||
"# This persists the state, in this case in memory.\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6bda388e-c794-4ca5-b96f-0b12f1daaca3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This application out-of-the-box supports multiple conversation threads. We pass in a configuration `dict` specifying a unique identifier for a thread to control what thread is run. This enables the application to support interactions with multiple users."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "1046c92f-21b3-4214-907d-92878d8cba23",
|
||||
"execution_count": 9,
|
||||
"id": "efdd4bcd-4de8-4d9a-8f95-4dd6960efc0a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help models decompose hard tasks into multiple manageable subtasks. This process allows agents to plan ahead and tackle intricate tasks effectively.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Task decomposition is the process of breaking down a complicated task into smaller, more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts enhance this process by guiding models to think step by step and explore multiple reasoning possibilities. This approach helps in simplifying complex tasks and provides insight into the model's reasoning.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_rag_chain.invoke(\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
|
||||
"\n",
|
||||
"result = app.invoke(\n",
|
||||
" {\"input\": \"What is Task Decomposition?\"},\n",
|
||||
" config={\n",
|
||||
" \"configurable\": {\"session_id\": \"abc123\"}\n",
|
||||
" }, # constructs a key \"abc123\" in `store`.\n",
|
||||
")[\"answer\"]"
|
||||
" config=config,\n",
|
||||
")\n",
|
||||
"print(result[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "0e89c75f-7ad7-4331-a2fe-57579eb8f840",
|
||||
"execution_count": 10,
|
||||
"id": "8ef6aefc-fe0e-457f-b552-303a45f47342",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition can be achieved through various methods such as using Language Model (LLM) with simple prompting, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to break down the task into smaller components. These approaches help in guiding agents to think step by step and decompose complex tasks into more manageable subgoals.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"One way of doing task decomposition is by using simple prompting, such as asking the model, \"What are the subgoals for achieving XYZ?\" This method encourages the model to identify and outline the smaller tasks needed to accomplish the larger goal.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_rag_chain.invoke(\n",
|
||||
" {\"input\": \"What are common ways of doing it?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")[\"answer\"]"
|
||||
"result = app.invoke(\n",
|
||||
" {\"input\": \"What is one way of doing it?\"},\n",
|
||||
" config=config,\n",
|
||||
")\n",
|
||||
"print(result[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -503,38 +534,38 @@
|
||||
"id": "3ab59258-84bc-4904-880e-2ebfebbca563",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The conversation history can be inspected in the `store` dict:"
|
||||
"The conversation history can be inspected via the state of the application:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "7686b874-3a85-499f-82b5-28a85c4c768c",
|
||||
"execution_count": 11,
|
||||
"id": "eddfde25-6fac-4ba2-b52f-0682c73b9c15",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"User: What is Task Decomposition?\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"AI: Task decomposition involves breaking down complex tasks into smaller and simpler steps to make them more manageable. Techniques like Chain of Thought (CoT) and Tree of Thoughts help models decompose hard tasks into multiple manageable subtasks. This process allows agents to plan ahead and tackle intricate tasks effectively.\n",
|
||||
"What is Task Decomposition?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"User: What are common ways of doing it?\n",
|
||||
"Task decomposition is the process of breaking down a complicated task into smaller, more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts enhance this process by guiding models to think step by step and explore multiple reasoning possibilities. This approach helps in simplifying complex tasks and provides insight into the model's reasoning.\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"AI: Task decomposition can be achieved through various methods such as using Language Model (LLM) with simple prompting, task-specific instructions tailored to the specific task at hand, or incorporating human inputs to break down the task into smaller components. These approaches help in guiding agents to think step by step and decompose complex tasks into more manageable subgoals.\n",
|
||||
"\n"
|
||||
"What is one way of doing it?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"One way of doing task decomposition is by using simple prompting, such as asking the model, \"What are the subgoals for achieving XYZ?\" This method encourages the model to identify and outline the smaller tasks needed to accomplish the larger goal.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for message in store[\"abc123\"].messages:\n",
|
||||
" if isinstance(message, AIMessage):\n",
|
||||
" prefix = \"AI\"\n",
|
||||
" else:\n",
|
||||
" prefix = \"User\"\n",
|
||||
"\n",
|
||||
" print(f\"{prefix}: {message.content}\\n\")"
|
||||
"chat_history = app.get_state(config).values[\"chat_history\"]\n",
|
||||
"for message in chat_history:\n",
|
||||
" message.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -557,24 +588,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 12,
|
||||
"id": "71c32048-1a41-465f-a9e2-c4affc332fd9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Sequence\n",
|
||||
"\n",
|
||||
"import bs4\n",
|
||||
"from langchain.chains import create_history_aware_retriever, create_retrieval_chain\n",
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"from langchain_community.chat_message_histories import ChatMessageHistory\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.messages import AIMessage, BaseMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, StateGraph\n",
|
||||
"from langgraph.graph.message import add_messages\n",
|
||||
"from typing_extensions import Annotated, TypedDict\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Construct retriever ###\n",
|
||||
@ -639,72 +674,77 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"### Statefully manage chat history ###\n",
|
||||
"store = {}\n",
|
||||
"class State(TypedDict):\n",
|
||||
" input: str\n",
|
||||
" chat_history: Annotated[Sequence[BaseMessage], add_messages]\n",
|
||||
" context: str\n",
|
||||
" answer: str\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = ChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"def call_model(state: State):\n",
|
||||
" response = rag_chain.invoke(state)\n",
|
||||
" return {\n",
|
||||
" \"chat_history\": [\n",
|
||||
" HumanMessage(state[\"input\"]),\n",
|
||||
" AIMessage(response[\"answer\"]),\n",
|
||||
" ],\n",
|
||||
" \"context\": response[\"context\"],\n",
|
||||
" \"answer\": response[\"answer\"],\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"conversational_rag_chain = RunnableWithMessageHistory(\n",
|
||||
" rag_chain,\n",
|
||||
" get_session_history,\n",
|
||||
" input_messages_key=\"input\",\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
" output_messages_key=\"answer\",\n",
|
||||
")"
|
||||
"workflow = StateGraph(state_schema=State)\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 13,
|
||||
"id": "6d0a7a73-d151-47d9-9e99-b4f3291c0322",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks to facilitate problem-solving. Different methods like Chain of Thought and Tree of Thoughts can be employed to decompose tasks effectively.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Task decomposition is the process of breaking down a complicated task into smaller, more manageable steps. Techniques like Chain of Thought (CoT) and Tree of Thoughts enhance this process by guiding models to think step by step and explore multiple reasoning possibilities. This approach helps in simplifying complex tasks and improving the model's performance.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_rag_chain.invoke(\n",
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
|
||||
"\n",
|
||||
"result = app.invoke(\n",
|
||||
" {\"input\": \"What is Task Decomposition?\"},\n",
|
||||
" config={\n",
|
||||
" \"configurable\": {\"session_id\": \"abc123\"}\n",
|
||||
" }, # constructs a key \"abc123\" in `store`.\n",
|
||||
")[\"answer\"]"
|
||||
" config=config,\n",
|
||||
")\n",
|
||||
"print(result[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 14,
|
||||
"id": "17021822-896a-4513-a17d-1d20b1c5381c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Task decomposition can be achieved through various methods such as using prompting techniques like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\", providing task-specific instructions like \"Write a story outline,\" or incorporating human inputs to break down complex tasks into smaller components. These approaches help in organizing thoughts and planning ahead for successful task completion.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"One way of doing task decomposition is by using simple prompting, such as asking the model, \"What are the subgoals for achieving XYZ?\" This method encourages the model to identify and outline the smaller steps needed to complete the larger task.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversational_rag_chain.invoke(\n",
|
||||
" {\"input\": \"What are common ways of doing it?\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")[\"answer\"]"
|
||||
"result = app.invoke(\n",
|
||||
" {\"input\": \"What is one way of doing it?\"},\n",
|
||||
" config=config,\n",
|
||||
")\n",
|
||||
"print(result[\"answer\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -726,7 +766,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 15,
|
||||
"id": "809cc747-2135-40a2-8e73-e4556343ee64",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -751,17 +791,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "931c4fe3-c603-4efb-9b37-5f7cbbb1cbbd",
|
||||
"execution_count": 16,
|
||||
"id": "1c8df9d7-6a74-471c-aaef-6c4819ee0cd0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.'"
|
||||
"'Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\\n\\nFig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\\nThe system comprises of 4 stages:\\n(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\\nInstruction:'"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -783,7 +823,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 17,
|
||||
"id": "1726d151-4653-4c72-a187-a14840add526",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -803,38 +843,70 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 18,
|
||||
"id": "170403a2-c914-41db-85d8-a2c381da112d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 1a50f4da-34a7-44af-8cbb-c67c90c9619e, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_1ZkTWsLYIlKZ1uMyIQGUuyJx', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 68, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-dddbe2d2-2355-4ca5-9961-1ceb39d78cf9-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_1ZkTWsLYIlKZ1uMyIQGUuyJx'}])]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_1ZkTWsLYIlKZ1uMyIQGUuyJx')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. This approach helps in managing and solving difficult tasks by dividing them into more manageable components. One common method of task decomposition is the Chain of Thought (CoT) technique, where models are instructed to think step by step to decompose hard tasks into smaller steps. Another extension of CoT is the Tree of Thoughts, which explores multiple reasoning possibilities at each step and generates multiple thoughts per step, creating a tree structure. Task decomposition can be facilitated by using simple prompts, task-specific instructions, or human inputs.', response_metadata={'token_usage': {'completion_tokens': 119, 'prompt_tokens': 636, 'total_tokens': 755}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-4a701854-97f2-4ec2-b6e1-73410911fa72-0')]}}\n",
|
||||
"----\n"
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What is Task Decomposition?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" blog_post_retriever (call_WKHdiejvg4In982Hr3EympuI)\n",
|
||||
" Call ID: call_WKHdiejvg4In982Hr3EympuI\n",
|
||||
" Args:\n",
|
||||
" query: Task Decomposition\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: blog_post_retriever\n",
|
||||
"\n",
|
||||
"Fig. 1. Overview of a LLM-powered autonomous agent system.\n",
|
||||
"Component One: Planning#\n",
|
||||
"A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n",
|
||||
"Task Decomposition#\n",
|
||||
"Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n",
|
||||
"\n",
|
||||
"Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n",
|
||||
"Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n",
|
||||
"\n",
|
||||
"(3) Task execution: Expert models execute on the specific tasks and log results.\n",
|
||||
"Instruction:\n",
|
||||
"\n",
|
||||
"With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n",
|
||||
"\n",
|
||||
"Fig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\n",
|
||||
"The system comprises of 4 stages:\n",
|
||||
"(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\n",
|
||||
"Instruction:\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Task Decomposition is a process used in complex problem-solving where a larger task is broken down into smaller, more manageable sub-tasks. This approach enhances the ability of models, particularly large language models (LLMs), to handle intricate tasks by allowing them to think step by step.\n",
|
||||
"\n",
|
||||
"There are several methods for task decomposition:\n",
|
||||
"\n",
|
||||
"1. **Chain of Thought (CoT)**: This technique encourages the model to articulate its reasoning process by thinking through the task in a sequential manner. It transforms a big task into smaller, manageable steps, which also provides insight into the model's thought process.\n",
|
||||
"\n",
|
||||
"2. **Tree of Thoughts**: An extension of CoT, this method explores multiple reasoning possibilities at each step. It decomposes the problem into various thought steps and generates multiple thoughts for each step, creating a tree structure. The evaluation of each state can be done using breadth-first search (BFS) or depth-first search (DFS).\n",
|
||||
"\n",
|
||||
"3. **Prompting Techniques**: Task decomposition can be achieved through simple prompts like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\" Additionally, task-specific instructions can guide the model, such as asking it to \"Write a story outline\" for creative tasks.\n",
|
||||
"\n",
|
||||
"4. **Human Inputs**: In some cases, human guidance can be used to assist in breaking down tasks.\n",
|
||||
"\n",
|
||||
"Overall, task decomposition is a crucial component in planning and executing complex tasks, allowing for better organization and clarity in the problem-solving process.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is Task Decomposition?\"\n",
|
||||
"\n",
|
||||
"for s in agent_executor.stream(\n",
|
||||
"for event in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=query)]},\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" print(s)\n",
|
||||
" print(\"----\")"
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -842,12 +914,12 @@
|
||||
"id": "1df703b1-aad6-48fb-b6fa-703e32ea88b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"LangGraph comes with built in persistence, so we don't need to use ChatMessageHistory! Rather, we can pass in a checkpointer to our LangGraph agent directly"
|
||||
"We can again take advantage of LangGraph's built-in persistence to save stateful updates to memory:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 19,
|
||||
"id": "04a3a664-3c3f-4cd1-9995-26662a52da7c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -871,7 +943,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 20,
|
||||
"id": "d6d70833-b958-4cd7-9e27-29c1c08bb1b8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -879,19 +951,24 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='Hello Bob! How can I assist you today?', response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 67, 'total_tokens': 78}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-022806f0-eb26-4c87-9132-ed2fcc6c21ea-0')]}}\n",
|
||||
"----\n"
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"Hi! I'm bob\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello Bob! How can I assist you today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
|
||||
"\n",
|
||||
"for s in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"Hi! I'm bob\")]}, config=config\n",
|
||||
"for event in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=\"Hi! I'm bob\")]},\n",
|
||||
" config=config,\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" print(s)\n",
|
||||
" print(\"----\")"
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -904,7 +981,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 21,
|
||||
"id": "e2c570ae-dd91-402c-8693-ae746de63b16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -912,34 +989,64 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_DdAAJJgGIQOZQgKVE4duDyML', 'function': {'arguments': '{\"query\":\"Task Decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 91, 'total_tokens': 110}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-acc3c903-4f6f-48dd-8b36-f6f3b80d0856-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'Task Decomposition'}, 'id': 'call_DdAAJJgGIQOZQgKVE4duDyML'}])]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in LangChainTracer.on_tool_end callback: TracerException(\"Found chain run at ID 9a7ba580-ec91-412d-9649-1b5cbf5ae7bc, but expected {'tool'} run.\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'tools': {'messages': [ToolMessage(content='Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.', name='blog_post_retriever', tool_call_id='call_DdAAJJgGIQOZQgKVE4duDyML')]}}\n",
|
||||
"----\n"
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What is Task Decomposition?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" blog_post_retriever (call_0rhrUJiHkoOQxwqCpKTkSkiu)\n",
|
||||
" Call ID: call_0rhrUJiHkoOQxwqCpKTkSkiu\n",
|
||||
" Args:\n",
|
||||
" query: Task Decomposition\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: blog_post_retriever\n",
|
||||
"\n",
|
||||
"Fig. 1. Overview of a LLM-powered autonomous agent system.\n",
|
||||
"Component One: Planning#\n",
|
||||
"A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n",
|
||||
"Task Decomposition#\n",
|
||||
"Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n",
|
||||
"\n",
|
||||
"Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n",
|
||||
"Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n",
|
||||
"\n",
|
||||
"(3) Task execution: Expert models execute on the specific tasks and log results.\n",
|
||||
"Instruction:\n",
|
||||
"\n",
|
||||
"With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n",
|
||||
"\n",
|
||||
"Fig. 11. Illustration of how HuggingGPT works. (Image source: Shen et al. 2023)\n",
|
||||
"The system comprises of 4 stages:\n",
|
||||
"(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.\n",
|
||||
"Instruction:\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Task Decomposition is a technique used to break down complex tasks into smaller, more manageable steps. This approach is particularly useful in the context of autonomous agents and large language models (LLMs). Here are some key points about Task Decomposition:\n",
|
||||
"\n",
|
||||
"1. **Chain of Thought (CoT)**: This is a prompting technique that encourages the model to \"think step by step.\" By doing so, it can utilize more computational resources to decompose difficult tasks into simpler ones, making them easier to handle.\n",
|
||||
"\n",
|
||||
"2. **Tree of Thoughts**: An extension of CoT, this method explores multiple reasoning possibilities at each step. It decomposes a problem into various thought steps and generates multiple thoughts for each step, creating a tree structure. This can be evaluated using search methods like breadth-first search (BFS) or depth-first search (DFS).\n",
|
||||
"\n",
|
||||
"3. **Methods of Decomposition**: Task decomposition can be achieved through:\n",
|
||||
" - Simple prompting (e.g., asking for steps to achieve a goal).\n",
|
||||
" - Task-specific instructions (e.g., requesting a story outline for writing).\n",
|
||||
" - Human inputs to guide the decomposition process.\n",
|
||||
"\n",
|
||||
"4. **Execution**: After decomposition, expert models execute the specific tasks and log the results, allowing for a structured approach to complex problem-solving.\n",
|
||||
"\n",
|
||||
"Overall, Task Decomposition enhances the model's ability to tackle intricate tasks by breaking them down into simpler, actionable components.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is Task Decomposition?\"\n",
|
||||
"\n",
|
||||
"for s in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=query)]}, config=config\n",
|
||||
"for event in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=query)]},\n",
|
||||
" config=config,\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" print(s)\n",
|
||||
" print(\"----\")"
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -954,7 +1061,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 22,
|
||||
"id": "570d8c68-136e-4ba5-969a-03ba195f6118",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -962,23 +1069,66 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_KvoiamnLfGEzMeEMlV3u0TJ7', 'function': {'arguments': '{\"query\":\"common ways of task decomposition\"}', 'name': 'blog_post_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 930, 'total_tokens': 951}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-dd842071-6dbd-4b68-8657-892eaca58638-0', tool_calls=[{'name': 'blog_post_retriever', 'args': {'query': 'common ways of task decomposition'}, 'id': 'call_KvoiamnLfGEzMeEMlV3u0TJ7'}])]}}\n",
|
||||
"----\n",
|
||||
"{'action': {'messages': [ToolMessage(content='Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\\nTask decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\\n\\nFig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\n\\nResources:\\n1. Internet access for searches and information gathering.\\n2. Long Term memory management.\\n3. GPT-3.5 powered Agents for delegation of simple tasks.\\n4. File output.\\n\\nPerformance Evaluation:\\n1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\\n2. Constructively self-criticize your big-picture behavior constantly.\\n3. Reflect on past decisions and strategies to refine your approach.\\n4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\\n\\n(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user\\'s request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.', name='blog_post_retriever', id='c749bb8e-c8e0-4fa3-bc11-3e2e0651880b', tool_call_id='call_KvoiamnLfGEzMeEMlV3u0TJ7')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='According to the blog post, common ways of task decomposition include:\\n\\n1. Using language models with simple prompting like \"Steps for XYZ\" or \"What are the subgoals for achieving XYZ?\"\\n2. Utilizing task-specific instructions, for example, using \"Write a story outline\" for writing a novel.\\n3. Involving human inputs in the task decomposition process.\\n\\nThese methods help in breaking down complex tasks into smaller and more manageable steps, facilitating better planning and execution of the overall task.', response_metadata={'token_usage': {'completion_tokens': 100, 'prompt_tokens': 1475, 'total_tokens': 1575}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': 'fp_3b956da36b', 'finish_reason': 'stop', 'logprobs': None}, id='run-98b765b3-f1a6-4c9a-ad0f-2db7950b900f-0')]}}\n",
|
||||
"----\n"
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What according to the blog post are common ways of doing it? redo the search\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" blog_post_retriever (call_bZRDF6Xr0QdurM9LItM8cN7a)\n",
|
||||
" Call ID: call_bZRDF6Xr0QdurM9LItM8cN7a\n",
|
||||
" Args:\n",
|
||||
" query: common ways of Task Decomposition\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: blog_post_retriever\n",
|
||||
"\n",
|
||||
"Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\n",
|
||||
"Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.\n",
|
||||
"\n",
|
||||
"Fig. 1. Overview of a LLM-powered autonomous agent system.\n",
|
||||
"Component One: Planning#\n",
|
||||
"A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\n",
|
||||
"Task Decomposition#\n",
|
||||
"Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\n",
|
||||
"\n",
|
||||
"Resources:\n",
|
||||
"1. Internet access for searches and information gathering.\n",
|
||||
"2. Long Term memory management.\n",
|
||||
"3. GPT-3.5 powered Agents for delegation of simple tasks.\n",
|
||||
"4. File output.\n",
|
||||
"\n",
|
||||
"Performance Evaluation:\n",
|
||||
"1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n",
|
||||
"2. Constructively self-criticize your big-picture behavior constantly.\n",
|
||||
"3. Reflect on past decisions and strategies to refine your approach.\n",
|
||||
"4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n",
|
||||
"\n",
|
||||
"(3) Task execution: Expert models execute on the specific tasks and log results.\n",
|
||||
"Instruction:\n",
|
||||
"\n",
|
||||
"With the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"According to the blog post, common ways to perform Task Decomposition include:\n",
|
||||
"\n",
|
||||
"1. **Simple Prompting**: Using straightforward prompts such as \"Steps for XYZ.\\n1.\" or \"What are the subgoals for achieving XYZ?\" to guide the model in breaking down the task.\n",
|
||||
"\n",
|
||||
"2. **Task-Specific Instructions**: Providing specific instructions tailored to the task at hand, such as asking for a \"story outline\" when writing a novel.\n",
|
||||
"\n",
|
||||
"3. **Human Inputs**: Involving human guidance or input to assist in the decomposition process, allowing for a more nuanced understanding of the task requirements.\n",
|
||||
"\n",
|
||||
"These methods help in transforming complex tasks into smaller, manageable components, facilitating better planning and execution.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What according to the blog post are common ways of doing it? redo the search\"\n",
|
||||
"\n",
|
||||
"for s in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=query)]}, config=config\n",
|
||||
"for event in agent_executor.stream(\n",
|
||||
" {\"messages\": [HumanMessage(content=query)]},\n",
|
||||
" config=config,\n",
|
||||
" stream_mode=\"values\",\n",
|
||||
"):\n",
|
||||
" print(s)\n",
|
||||
" print(\"----\")"
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1001,7 +1151,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "b1d2b4d4-e604-497d-873d-d345b808578e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -1016,7 +1166,7 @@
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Construct retriever ###\n",
|
||||
@ -1064,7 +1214,7 @@
|
||||
"\n",
|
||||
"To explore different types of retrievers and retrieval strategies, visit the [retrievers](/docs/how_to/#retrievers) section of the how-to guides.\n",
|
||||
"\n",
|
||||
"For a detailed walkthrough of LangChain's conversation memory abstractions, visit the [How to add message history (memory)](/docs/how_to/message_history) LCEL page.\n",
|
||||
"For a detailed walkthrough of LangChain's conversation memory abstractions, visit the [How to add message history (memory)](/docs/how_to/message_history) guide.\n",
|
||||
"\n",
|
||||
"To learn more about agents, head to the [Agents Modules](/docs/tutorials/agents)."
|
||||
]
|
||||
@ -1094,7 +1244,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -9,13 +9,13 @@
|
||||
"\n",
|
||||
"[`ConversationChain`](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.conversation.base.ConversationChain.html) incorporated a memory of previous messages to sustain a stateful conversation.\n",
|
||||
"\n",
|
||||
"Some advantages of switching to the LCEL implementation are:\n",
|
||||
"Some advantages of switching to the Langgraph implementation are:\n",
|
||||
"\n",
|
||||
"- Innate support for threads/separate sessions. To make this work with `ConversationChain`, you'd need to instantiate a separate memory class outside the chain.\n",
|
||||
"- More explicit parameters. `ConversationChain` contains a hidden default prompt, which can cause confusion.\n",
|
||||
"- Streaming support. `ConversationChain` only supports streaming via callbacks.\n",
|
||||
"\n",
|
||||
"`RunnableWithMessageHistory` implements sessions via configuration parameters. It should be instantiated with a callable that returns a [chat message history](https://python.langchain.com/api_reference/core/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html). By default, it expects this function to take a single argument `session_id`."
|
||||
"Langgraph's [checkpointing](https://langchain-ai.github.io/langgraph/how-tos/persistence/) system supports multiple threads or sessions, which can be specified via the `\"thread_id\"` key in its configuration parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -61,9 +61,9 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'how are you?',\n",
|
||||
"{'input': \"I'm Bob, how are you?\",\n",
|
||||
" 'history': '',\n",
|
||||
" 'response': \"Arr matey, I be doin' well on the high seas, plunderin' and pillagin' as usual. How be ye?\"}"
|
||||
" 'response': \"Arrr matey, I be a pirate sailin' the high seas. What be yer business with me?\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
@ -93,7 +93,30 @@
|
||||
" prompt=prompt,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain({\"input\": \"how are you?\"})"
|
||||
"chain({\"input\": \"I'm Bob, how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "53f2c723-178f-470a-8147-54e7cb982211",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input': 'What is my name?',\n",
|
||||
" 'history': \"Human: I'm Bob, how are you?\\nAI: Arrr matey, I be a pirate sailin' the high seas. What be yer business with me?\",\n",
|
||||
" 'response': 'Your name be Bob, matey.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"input\": \"What is my name?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -103,111 +126,110 @@
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"## LCEL\n",
|
||||
"## Langgraph\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "666c92a0-b555-4418-a465-6490c1b92570",
|
||||
"execution_count": 4,
|
||||
"id": "a59b910c-0d02-41aa-bc99-441f11989cf8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Arr, me matey! I be doin' well, sailin' the high seas and searchin' for treasure. How be ye?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, MessagesState, StateGraph\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are a pirate. Answer the following questions as best you can.\"),\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o-mini\")\n",
|
||||
"\n",
|
||||
"history = InMemoryChatMessageHistory()\n",
|
||||
"# Define a new graph\n",
|
||||
"workflow = StateGraph(state_schema=MessagesState)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_history():\n",
|
||||
" return history\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: MessagesState):\n",
|
||||
" response = model.invoke(state[\"messages\"])\n",
|
||||
" return {\"messages\": response}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"# Define the two nodes we will cycle between\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_history,\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"# Add memory\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"app = workflow.compile(checkpointer=memory)\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke({\"input\": \"how are you?\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b386ce6-895e-442c-88f3-7bec0ab9f401",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above example uses the same `history` for all sessions. The example below shows how to use a different chat history for each session."
|
||||
"\n",
|
||||
"# The thread id is a unique key that identifies\n",
|
||||
"# this particular conversation.\n",
|
||||
"# We'll just generate a random uuid here.\n",
|
||||
"thread_id = uuid.uuid4()\n",
|
||||
"config = {\"configurable\": {\"thread_id\": thread_id}}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "96152263-98d7-4e06-8c73-d0c0abf3e8e9",
|
||||
"execution_count": 5,
|
||||
"id": "3a9df4bb-e804-4373-9a15-a29dc0371595",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Ahoy there, me hearty! What can this old pirate do for ye today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"I'm Bob, how are you?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Ahoy, Bob! I be feelin' as lively as a ship in full sail! How be ye on this fine day?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
|
||||
"query = \"I'm Bob, how are you?\"\n",
|
||||
"\n",
|
||||
"store = {}\n",
|
||||
"input_messages = [\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": \"You are a pirate. Answer the following questions as best you can.\",\n",
|
||||
" },\n",
|
||||
" {\"role\": \"user\", \"content\": query},\n",
|
||||
"]\n",
|
||||
"for event in app.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d3f77e69-fa3d-496c-968c-86371e1e8cf1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What is my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Ye be callin' yerself Bob, I reckon! A fine name for a swashbuckler like yerself!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is my name?\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
|
||||
" if session_id not in store:\n",
|
||||
" store[session_id] = InMemoryChatMessageHistory()\n",
|
||||
" return store[session_id]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = prompt | ChatOpenAI() | StrOutputParser()\n",
|
||||
"\n",
|
||||
"wrapped_chain = RunnableWithMessageHistory(\n",
|
||||
" chain,\n",
|
||||
" get_session_history,\n",
|
||||
" history_messages_key=\"chat_history\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"wrapped_chain.invoke(\n",
|
||||
" {\"input\": \"Hello!\"},\n",
|
||||
" config={\"configurable\": {\"session_id\": \"abc123\"}},\n",
|
||||
")"
|
||||
"input_messages = [{\"role\": \"user\", \"content\": query}]\n",
|
||||
"for event in app.stream({\"messages\": input_messages}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
300
docs/docs/versions/migrating_memory/chat_history.ipynb
Normal file
300
docs/docs/versions/migrating_memory/chat_history.ipynb
Normal file
@ -0,0 +1,300 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c298a5c9-b9af-481d-9eba-cbd65f987a8a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use BaseChatMessageHistory with LangGraph\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"* [Chat History](/docs/concepts/#chat-history)\n",
|
||||
"* [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html)\n",
|
||||
"* [LangGraph](https://langchain-ai.github.io/langgraph/concepts/high_level/)\n",
|
||||
"* [Memory](https://langchain-ai.github.io/langgraph/concepts/agentic_concepts/#memory)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"We recommend that new LangChain applications take advantage of the [built-in LangGraph peristence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to implement memory.\n",
|
||||
"\n",
|
||||
"In some situations, users may need to keep using an existing persistence solution for chat message history.\n",
|
||||
"\n",
|
||||
"Here, we will show how to use [LangChain chat message histories](https://python.langchain.com/docs/integrations/memory/) (implementations of [BaseChatMessageHistory](https://python.langchain.com/api_reference/core/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html)) with LangGraph."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "548bc988-167b-43f1-860a-d247e28b2b42",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6cbfd2ab-7537-4269-8249-646fa89bf016",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install --upgrade --quiet langchain-anthropic langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0694febf-dfa8-46ef-babc-f8b16b5a2926",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "c5e08659-b68c-48f2-8b33-e79b0c6999e1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ChatMessageHistory\n",
|
||||
"\n",
|
||||
"A message history needs to be parameterized by a conversation ID or maybe by the 2-tuple of (user ID, conversation ID).\n",
|
||||
"\n",
|
||||
"Many of the [LangChain chat message histories](https://python.langchain.com/docs/integrations/memory/) will have either a `session_id` or some `namespace` to allow keeping track of different conversations. Please refer to the specific implementations to check how it is parameterized.\n",
|
||||
"\n",
|
||||
"The built-in `InMemoryChatMessageHistory` does not contains such a parameterization, so we'll create a dictionary to keep track of the message histories."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "28049308-2543-48e6-90d0-37a88951a637",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
|
||||
"\n",
|
||||
"chats_by_session_id = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_chat_history(session_id: str) -> InMemoryChatMessageHistory:\n",
|
||||
" chat_history = chats_by_session_id.get(session_id)\n",
|
||||
" if chat_history is None:\n",
|
||||
" chat_history = InMemoryChatMessageHistory()\n",
|
||||
" chats_by_session_id[session_id] = chat_history\n",
|
||||
" return chat_history"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "94c53ce3-4212-41e6-8ad3-f0ab5df6130f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use with LangGraph\n",
|
||||
"\n",
|
||||
"Next, we'll set up a basic chat bot using LangGraph. If you're not familiar with LangGraph, you should look at the following [Quick Start Tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/).\n",
|
||||
"\n",
|
||||
"We'll create a [LangGraph node](https://langchain-ai.github.io/langgraph/concepts/low_level/#nodes) for the chat model, and manually manage the conversation history, taking into account the conversation ID passed as part of the RunnableConfig.\n",
|
||||
"\n",
|
||||
"The conversation ID can be passed as either part of the RunnableConfig (as we'll do here), or as part of the [graph state](https://langchain-ai.github.io/langgraph/concepts/low_level/#state)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a6633dd2-2d6a-4121-b087-4907c9f588ca",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"hi! I'm bob\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello Bob! It's nice to meet you. I'm Claude, an AI assistant created by Anthropic. How are you doing today?\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"what was my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"You introduced yourself as Bob when you said \"hi! I'm bob\".\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"from langchain_core.messages import BaseMessage, HumanMessage\n",
|
||||
"from langchain_core.runnables import RunnableConfig\n",
|
||||
"from langgraph.graph import START, MessagesState, StateGraph\n",
|
||||
"\n",
|
||||
"# Define a new graph\n",
|
||||
"builder = StateGraph(state_schema=MessagesState)\n",
|
||||
"\n",
|
||||
"# Define a chat model\n",
|
||||
"model = ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: MessagesState, config: RunnableConfig) -> list[BaseMessage]:\n",
|
||||
" # Make sure that config is populated with the session id\n",
|
||||
" if \"configurable\" not in config or \"session_id\" not in config[\"configurable\"]:\n",
|
||||
" raise ValueError(\n",
|
||||
" \"Make sure that the config includes the following information: {'configurable': {'session_id': 'some_value'}}\"\n",
|
||||
" )\n",
|
||||
" # Fetch the history of messages and append to it any new messages.\n",
|
||||
" # highlight-start\n",
|
||||
" chat_history = get_chat_history(config[\"configurable\"][\"session_id\"])\n",
|
||||
" messages = list(chat_history.messages) + state[\"messages\"]\n",
|
||||
" # highlight-end\n",
|
||||
" ai_message = model.invoke(messages)\n",
|
||||
" # Finally, update the chat message history to include\n",
|
||||
" # the new input message from the user together with the\n",
|
||||
" # repsonse from the model.\n",
|
||||
" # highlight-next-line\n",
|
||||
" chat_history.add_messages(state[\"messages\"] + [ai_message])\n",
|
||||
" return {\"messages\": ai_message}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the two nodes we will cycle between\n",
|
||||
"builder.add_edge(START, \"model\")\n",
|
||||
"builder.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"graph = builder.compile()\n",
|
||||
"\n",
|
||||
"# Here, we'll create a unique session ID to identify the conversation\n",
|
||||
"session_id = uuid.uuid4()\n",
|
||||
"config = {\"configurable\": {\"session_id\": session_id}}\n",
|
||||
"\n",
|
||||
"input_message = HumanMessage(content=\"hi! I'm bob\")\n",
|
||||
"for event in graph.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()\n",
|
||||
"\n",
|
||||
"# Here, let's confirm that the AI remembers our name!\n",
|
||||
"input_message = HumanMessage(content=\"what was my name?\")\n",
|
||||
"for event in graph.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c0766af-a3b3-4293-b253-3a10f365ab5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::hint\n",
|
||||
"\n",
|
||||
"This also supports streaming LLM content token by token if using langgraph >= 0.2.28.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "044b63dd-fb15-4a03-89c5-aaaf7346ea76",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"You| sai|d your| name was Bob.|"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import AIMessageChunk\n",
|
||||
"\n",
|
||||
"first = True\n",
|
||||
"\n",
|
||||
"for msg, metadata in graph.stream(\n",
|
||||
" {\"messages\": input_message}, config, stream_mode=\"messages\"\n",
|
||||
"):\n",
|
||||
" if msg.content and not isinstance(msg, HumanMessage):\n",
|
||||
" print(msg.content, end=\"|\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "da0536dd-9a0b-49e3-b0b6-e8c7abf3b1f9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using With RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"This how-to guide used the `messages` and `add_messages` interface of `BaseChatMessageHistory` directly. \n",
|
||||
"\n",
|
||||
"Alternatively, you can use [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html), as [LCEL](/docs/concepts/#langchain-expression-language-lcel/) can be used inside any [LangGraph node](https://langchain-ai.github.io/langgraph/concepts/low_level/#nodes).\n",
|
||||
"\n",
|
||||
"To do that replace the following code:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"def call_model(state: MessagesState, config: RunnableConfig) -> list[BaseMessage]:\n",
|
||||
" # highlight-start\n",
|
||||
" # Make sure that config is populated with the session id\n",
|
||||
" if \"configurable\" not in config or \"session_id\" not in config[\"configurable\"]:\n",
|
||||
" raise ValueError(\n",
|
||||
" \"You make sure that the config includes the following information: {'configurable': {'session_id': 'some_value'}}\"\n",
|
||||
" )\n",
|
||||
" # Fetch the history of messages and append to it any new messages.\n",
|
||||
" chat_history = get_chat_history(config[\"configurable\"][\"session_id\"])\n",
|
||||
" messages = list(chat_history.messages) + state[\"messages\"]\n",
|
||||
" ai_message = model.invoke(messages)\n",
|
||||
" # Finally, update the chat message history to include\n",
|
||||
" # the new input message from the user together with the\n",
|
||||
" # repsonse from the model.\n",
|
||||
" chat_history.add_messages(state[\"messages\"] + [ai_message])\n",
|
||||
" # hilight-end\n",
|
||||
" return {\"messages\": ai_message}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"With the corresponding instance of `RunnableWithMessageHistory` defined in your current application.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"runnable = RunnableWithMessageHistory(...) # From existing code\n",
|
||||
"\n",
|
||||
"def call_model(state: MessagesState, config: RunnableConfig) -> list[BaseMessage]:\n",
|
||||
" # RunnableWithMessageHistory takes care of reading the message history\n",
|
||||
" # and updating it with the new human message and ai response.\n",
|
||||
" ai_message = runnable.invoke(state['messages'], config)\n",
|
||||
" return {\n",
|
||||
" \"messages\": ai_message\n",
|
||||
" }\n",
|
||||
"```"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -2,10 +2,31 @@
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# How to migrate from v0.0 memory
|
||||
# How to migrate to LangGraph memory
|
||||
|
||||
As of the v0.3 release of LangChain, we recommend that LangChain users take advantage of [LangGraph persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to incorporate `memory` into their LangChain application.
|
||||
|
||||
* Users that rely on `RunnableWithMessageHistory` or `BaseChatMessageHistory` do **not** need to make any changes, but are encouraged to consider using LangGraph for more complex use cases.
|
||||
* Users that rely on deprecated memory abstractions from LangChain 0.0.x should follow this guide to upgrade to the new LangGraph persistence feature in LangChain 0.3.x.
|
||||
|
||||
## Why use LangGraph for memory?
|
||||
|
||||
The main advantages of persistence in LangGraph are:
|
||||
|
||||
- Built-in support for multiple users and conversations, which is a typical requirement for real-world conversational AI applications.
|
||||
- Ability to save and resume complex conversations at any point. This helps with:
|
||||
- Error recovery
|
||||
- Allowing human intervention in AI workflows
|
||||
- Exploring different conversation paths ("time travel")
|
||||
- Full compatibility with both traditional [language models](/docs/concepts/#llms) and modern [chat models](/docs/concepts/#chat-models). Early memory implementations in LangChain weren't designed for newer chat model APIs, causing issues with features like tool-calling. LangGraph memory can persist any custom state.
|
||||
- Highly customizable, allowing you to fully control how memory works and use different storage backends.
|
||||
|
||||
## Evolution of memory in LangChain
|
||||
|
||||
The concept of memory has evolved significantly in LangChain since its initial release.
|
||||
|
||||
### LangChain 0.0.x memory
|
||||
|
||||
Broadly speaking, LangChain 0.0.x memory was used to handle three main use cases:
|
||||
|
||||
| Use Case | Example |
|
||||
@ -16,16 +37,27 @@ Broadly speaking, LangChain 0.0.x memory was used to handle three main use cases
|
||||
|
||||
While the LangChain 0.0.x memory abstractions were useful, they were limited in their capabilities and not well suited for real-world conversational AI applications. These memory abstractions lacked built-in support for multi-user, multi-conversation scenarios, which are essential for practical conversational AI systems.
|
||||
|
||||
This guide will help you migrate your usage of memory implementations from LangChain v0.0.x to the persistence implementations of LangGraph.
|
||||
Most of these implementations have been officially deprecated in LangChain 0.3.x in favor of LangGraph persistence.
|
||||
|
||||
## Why use LangGraph for memory?
|
||||
### RunnableWithMessageHistory and BaseChatMessageHistory
|
||||
|
||||
The main advantages of persistence implementation in LangGraph are:
|
||||
:::note
|
||||
Please see [How to use BaseChatMessageHistory with LangGraph](./chat_history), if you would like to use `BaseChatMessageHistory` (with or without `RunnableWithMessageHistory`) in LangGraph.
|
||||
:::
|
||||
|
||||
- Built-in support for multi-user, multi-conversation scenarios which is often a requirement for real-world conversational AI applications.
|
||||
- Ability to save and resume complex state at any time for error recovery, human-in-the-loop workflows, time travel interactions, and more.
|
||||
- Full support for both [LLM](/docs/concepts/#llms) and [chat models](/docs/concepts/#chat-models). In contrast, the v0.0.x memory abstractions were created prior to the existence and widespread adoption of chat model APIs, and so it does not work well with chat models (e.g., fails with tool calling chat models).
|
||||
- Offers a high degree of customization and control over the memory implementation, including the ability to use different backends.
|
||||
As of LangChain v0.1, we started recommending that users rely primarily on [BaseChatMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory). `BaseChatMessageHistory` serves
|
||||
as a simple persistence for storing and retrieving messages in a conversation.
|
||||
|
||||
At that time, the only option for orchestrating LangChain chains was via [LCEL](https://python.langchain.com/docs/how_to/#langchain-expression-language-lcel). To incorporate memory with `LCEL`, users had to use the [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) interface. While sufficient for basic chat applications, many users found the API unintuitive and challenging to use.
|
||||
|
||||
As of LangChain v0.3, we recommend that **new** code takes advantage of LangGraph for both orchestration and persistence:
|
||||
|
||||
- Orchestration: In LangGraph, users define [graphs](https://langchain-ai.github.io/langgraph/concepts/low_level/) that specify the flow of the application. This allows users to keep using `LCEL` within individual nodes when `LCEL` is needed, while making it easy to define complex orchestration logic that is more readable and maintainable.
|
||||
- Persistence: Users can rely on LangGraph's [persistence](https://langchain-ai.github.io/langgraph/concepts/persistence/) to store and retrieve data. LangGraph persistence is extremely flexible and can support a much wider range of use cases than the `RunnableWithMessageHistory` interface.
|
||||
|
||||
:::important
|
||||
If you have been using `RunnableWithMessageHistory` or `BaseChatMessageHistory`, you do not need to make any changes. We do not plan on deprecating either functionality in the near future. This functionality is sufficient for simple chat applications and any code that uses `RunnableWithMessageHistory` will continue to work as expected.
|
||||
:::
|
||||
|
||||
## Migrations
|
||||
|
||||
@ -45,19 +77,21 @@ Often this involves trimming and / or summarizing the conversation history to ke
|
||||
|
||||
Memory classes that fall into this category include:
|
||||
|
||||
| Memory Type | How to Migrate | Description |
|
||||
|-----------------------------------|:-------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `ConversationBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A basic memory implementation that simply stores the conversation history. |
|
||||
| `ConversationStringBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A special case of `ConversationBufferMemory` designed for LLMs and no longer relevant. |
|
||||
| `ConversationBufferWindowMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps the last `n` turns of the conversation. Drops the oldest turn when the buffer is full. |
|
||||
| `ConversationTokenBufferMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
|
||||
| `ConversationSummaryMemory` | [Link to Migration Guide](conversation_summary_memory) | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. |
|
||||
| `ConversationSummaryBufferMemory` | [Link to Migration Guide](conversation_summary_memory) | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
|
||||
| `VectorStoreRetrieverMemory` | No migration guide yet | Stores the conversation history in a vector store and retrieves the most relevant parts of past conversation based on the input. |
|
||||
| Memory Type | How to Migrate | Description |
|
||||
|-----------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `ConversationBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A basic memory implementation that simply stores the conversation history. |
|
||||
| `ConversationStringBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A special case of `ConversationBufferMemory` designed for LLMs and no longer relevant. |
|
||||
| `ConversationBufferWindowMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps the last `n` turns of the conversation. Drops the oldest turn when the buffer is full. |
|
||||
| `ConversationTokenBufferMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
|
||||
| `ConversationSummaryMemory` | [Link to Migration Guide](conversation_summary_memory) | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. |
|
||||
| `ConversationSummaryBufferMemory` | [Link to Migration Guide](conversation_summary_memory) | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
|
||||
| `VectorStoreRetrieverMemory` | See related [long-term memory agent tutorial](https://langchain-ai.github.io/langgraph/tutorials/memory/long_term_memory_agent/) | Stores the conversation history in a vector store and retrieves the most relevant parts of past conversation based on the input. |
|
||||
|
||||
|
||||
### 2. Extraction of structured information from the conversation history
|
||||
|
||||
Please see [long-term memory agent tutorial](https://langchain-ai.github.io/langgraph/tutorials/memory/long_term_memory_agent/) implements an agent that can extract structured information from the conversation history.
|
||||
|
||||
Memory classes that fall into this category include:
|
||||
|
||||
| Memory Type | Description |
|
||||
@ -78,9 +112,10 @@ These abstractions have not received much development since their initial releas
|
||||
is that for these abstractions to be useful they typically require a lot of specialization for a particular application, so these
|
||||
abstractions are not as widely used as the conversation history management abstractions.
|
||||
|
||||
For this reason, there are no migration guides for these abstractions. If you're struggling to migrate an applications
|
||||
that relies on these abstractions, please open an issue on the LangChain GitHub repository and we'll try to prioritize providing
|
||||
more guidance on how to migrate these abstractions.
|
||||
For this reason, there are no migration guides for these abstractions. If you're struggling to migrate an application
|
||||
that relies on these abstractions, please:
|
||||
1) Please review this [Long-term memory agent tutorial](https://langchain-ai.github.io/langgraph/tutorials/memory/long_term_memory_agent/) which should provide a good starting point for how to extract structured information from the conversation history.
|
||||
2) If you're still struggling, please open an issue on the LangChain GitHub repository, explain your use case, and we'll try to provide more guidance on how to migrate these abstractions.
|
||||
|
||||
The general strategy for extracting structured information from the conversation history is to use a chat model with tool calling capabilities to extract structured information from the conversation history.
|
||||
The extracted information can then be saved into an appropriate data structure (e.g., a dictionary), and information from it can be retrieved and added into the prompt as needed.
|
||||
|
@ -104,7 +104,7 @@ module.exports = {
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Migrating from v0.0 memory",
|
||||
label: "Upgrading to LangGraph memory",
|
||||
link: {type: 'doc', id: 'versions/migrating_memory/index'},
|
||||
collapsible: false,
|
||||
collapsed: false,
|
||||
|
BIN
docs/static/img/message_history.png
vendored
BIN
docs/static/img/message_history.png
vendored
Binary file not shown.
Before Width: | Height: | Size: 39 KiB |
@ -581,12 +581,38 @@ def trim_messages(
|
||||
) -> list[BaseMessage]:
|
||||
"""Trim messages to be below a token count.
|
||||
|
||||
trim_messages can be used to reduce the size of a chat history to a specified token
|
||||
count or specified message count.
|
||||
|
||||
In either case, if passing the trimmed chat history back into a chat model
|
||||
directly, the resulting chat history should usually satisfy the following
|
||||
properties:
|
||||
|
||||
1. The resulting chat history should be valid. Most chat models expect that chat
|
||||
history starts with either (1) a `HumanMessage` or (2) a `SystemMessage` followed
|
||||
by a `HumanMessage`. To achieve this, set `start_on="human"`.
|
||||
In addition, generally a `ToolMessage` can only appear after an `AIMessage`
|
||||
that involved a tool call.
|
||||
Please see the following link for more information about messages:
|
||||
https://python.langchain.com/docs/concepts/#messages
|
||||
2. It includes recent messages and drops old messages in the chat history.
|
||||
To achieve this set the `strategy="last"`.
|
||||
3. Usually, the new chat history should include the `SystemMessage` if it
|
||||
was present in the original chat history since the `SystemMessage` includes
|
||||
special instructions to the chat model. The `SystemMessage` is almost always
|
||||
the first message in the history if present. To achieve this set the
|
||||
`include_system=True`.
|
||||
|
||||
**Note** The examples below show how to configure `trim_messages` to achieve
|
||||
a behavior consistent with the above properties.
|
||||
|
||||
Args:
|
||||
messages: Sequence of Message-like objects to trim.
|
||||
max_tokens: Max token count of trimmed messages.
|
||||
token_counter: Function or llm for counting tokens in a BaseMessage or a list of
|
||||
BaseMessage. If a BaseLanguageModel is passed in then
|
||||
BaseLanguageModel.get_num_tokens_from_messages() will be used.
|
||||
Set to `len` to count the number of **messages** in the chat history.
|
||||
strategy: Strategy for trimming.
|
||||
- "first": Keep the first <= n_count tokens of the messages.
|
||||
- "last": Keep the last <= n_count tokens of the messages.
|
||||
@ -633,11 +659,97 @@ def trim_messages(
|
||||
``strategy`` is specified.
|
||||
|
||||
Example:
|
||||
Trim chat history based on token count, keeping the SystemMessage if
|
||||
present, and ensuring that the chat history starts with a HumanMessage (
|
||||
or a SystemMessage followed by a HumanMessage).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from typing import List
|
||||
|
||||
from langchain_core.messages import trim_messages, AIMessage, BaseMessage, HumanMessage, SystemMessage
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
BaseMessage,
|
||||
SystemMessage,
|
||||
trim_messages,
|
||||
)
|
||||
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant, you always respond with a joke."),
|
||||
HumanMessage("i wonder why it's called langchain"),
|
||||
AIMessage(
|
||||
'Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!'
|
||||
),
|
||||
HumanMessage("and who is harrison chasing anyways"),
|
||||
AIMessage(
|
||||
"Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!"
|
||||
),
|
||||
HumanMessage("what do you call a speechless parrot"),
|
||||
]
|
||||
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=45,
|
||||
strategy="last",
|
||||
token_counter=ChatOpenAI(model="gpt-4o"),
|
||||
# Most chat models expect that chat history starts with either:
|
||||
# (1) a HumanMessage or
|
||||
# (2) a SystemMessage followed by a HumanMessage
|
||||
start_on="human",
|
||||
# Usually, we want to keep the SystemMessage
|
||||
# if it's present in the original history.
|
||||
# The SystemMessage has special instructions for the model.
|
||||
include_system=True,
|
||||
allow_partial=False,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage(content="you're a good assistant, you always respond with a joke."),
|
||||
HumanMessage(content='what do you call a speechless parrot'),
|
||||
]
|
||||
|
||||
Trim chat history based on the message count, keeping the SystemMessage if
|
||||
present, and ensuring that the chat history starts with a HumanMessage (
|
||||
or a SystemMessage followed by a HumanMessage).
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
# When `len` is passed in as the token counter function,
|
||||
# max_tokens will count the number of messages in the chat history.
|
||||
max_tokens=4,
|
||||
strategy="last",
|
||||
# Passing in `len` as a token counter function will
|
||||
# count the number of messages in the chat history.
|
||||
token_counter=len,
|
||||
# Most chat models expect that chat history starts with either:
|
||||
# (1) a HumanMessage or
|
||||
# (2) a SystemMessage followed by a HumanMessage
|
||||
start_on="human",
|
||||
# Usually, we want to keep the SystemMessage
|
||||
# if it's present in the original history.
|
||||
# The SystemMessage has special instructions for the model.
|
||||
include_system=True,
|
||||
allow_partial=False,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage(content="you're a good assistant, you always respond with a joke."),
|
||||
HumanMessage(content='and who is harrison chasing anyways'),
|
||||
AIMessage(content="Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!"),
|
||||
HumanMessage(content='what do you call a speechless parrot'),
|
||||
]
|
||||
|
||||
|
||||
Trim chat history using a custom token counter function that counts the
|
||||
number of tokens in each message.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
messages = [
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
@ -670,18 +782,6 @@ def trim_messages(
|
||||
count += default_msg_prefix_len + len(msg.content) * default_content_len + default_msg_suffix_len
|
||||
return count
|
||||
|
||||
First 30 tokens, not allowing partial messages:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(messages, max_tokens=30, token_counter=dummy_token_counter, strategy="first")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
]
|
||||
|
||||
First 30 tokens, allowing partial messages:
|
||||
.. code-block:: python
|
||||
|
||||
@ -700,108 +800,6 @@ def trim_messages(
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"),
|
||||
]
|
||||
|
||||
First 30 tokens, allowing partial messages, have to end on HumanMessage:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="first"
|
||||
allow_partial=True,
|
||||
end_on="human",
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
|
||||
]
|
||||
|
||||
|
||||
Last 30 tokens, including system message, not allowing partial messages:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(messages, max_tokens=30, include_system=True, token_counter=dummy_token_counter, strategy="last")
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
|
||||
Last 40 tokens, including system message, allowing partial messages:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=40,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
allow_partial=True,
|
||||
include_system=True
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."},],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
|
||||
Last 30 tokens, including system message, allowing partial messages, end on HumanMessage:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=30,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
end_on="human",
|
||||
include_system=True,
|
||||
allow_partial=True,
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
AIMessage(
|
||||
[{"type": "text", "text": "This is the FIRST 4 token block."},],
|
||||
id="second",
|
||||
),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
]
|
||||
|
||||
Last 40 tokens, including system message, allowing partial messages, start on HumanMessage:
|
||||
.. code-block:: python
|
||||
|
||||
trim_messages(
|
||||
messages,
|
||||
max_tokens=40,
|
||||
token_counter=dummy_token_counter,
|
||||
strategy="last",
|
||||
include_system=True,
|
||||
allow_partial=True,
|
||||
start_on="human"
|
||||
)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
||||
HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="third"),
|
||||
AIMessage("This is a 4 token text. The full message is 10 tokens.", id="fourth"),
|
||||
]
|
||||
""" # noqa: E501
|
||||
|
||||
if start_on and strategy == "first":
|
||||
|
Loading…
Reference in New Issue
Block a user