mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-14 17:23:13 +00:00
Compare commits
109 Commits
cc/multi_m
...
langchain-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8574442c57 | ||
|
|
920d504e47 | ||
|
|
1f3054502e | ||
|
|
589bc19890 | ||
|
|
27296bdb0c | ||
|
|
0e9d0dbc10 | ||
|
|
de56c31672 | ||
|
|
335f089d6a | ||
|
|
9418c0d8a5 | ||
|
|
23f701b08e | ||
|
|
b344f34635 | ||
|
|
017c8079e1 | ||
|
|
d0cd115356 | ||
|
|
34ddfba76b | ||
|
|
5ffcd01c41 | ||
|
|
096f0e5966 | ||
|
|
46de0866db | ||
|
|
d624a475e4 | ||
|
|
dbf9986d44 | ||
|
|
0c723af4b0 | ||
|
|
f14bcee525 | ||
|
|
98c357b3d7 | ||
|
|
d2cbfa379f | ||
|
|
75e50a3efd | ||
|
|
61d2dc011e | ||
|
|
f0f90c4d88 | ||
|
|
f01b89df56 | ||
|
|
add6a78f98 | ||
|
|
2c2db1ab69 | ||
|
|
86d51f6be6 | ||
|
|
83b66cb916 | ||
|
|
ff2930c119 | ||
|
|
b36c2bf833 | ||
|
|
9e82f1df4e | ||
|
|
fa362189a1 | ||
|
|
88fce67724 | ||
|
|
60d8ade078 | ||
|
|
ca39680d2a | ||
|
|
4af3f89a3a | ||
|
|
4ff576e37d | ||
|
|
085baef926 | ||
|
|
47ded80b64 | ||
|
|
cf2697ec53 | ||
|
|
8e9569cbc8 | ||
|
|
dd5f5902e3 | ||
|
|
3382ee8f57 | ||
|
|
ef5aff3b6c | ||
|
|
a4ca1fe0ed | ||
|
|
6baf5c05a6 | ||
|
|
c6a8663afb | ||
|
|
1f5e207379 | ||
|
|
7240458619 | ||
|
|
6aa5494a75 | ||
|
|
7262de4217 | ||
|
|
9cfe6bcacd | ||
|
|
09438857e8 | ||
|
|
e3b6cddd5e | ||
|
|
59f2c9e737 | ||
|
|
ed5c4805f6 | ||
|
|
2282762528 | ||
|
|
f7c4965fb6 | ||
|
|
edb6a23aea | ||
|
|
3a64c7195f | ||
|
|
4f69094b51 | ||
|
|
ada740b5b9 | ||
|
|
f005988e31 | ||
|
|
446361a0d3 | ||
|
|
afd457d8e1 | ||
|
|
42944f3499 | ||
|
|
bb2c2fd885 | ||
|
|
913c896598 | ||
|
|
2803a48661 | ||
|
|
fdc2b4bcac | ||
|
|
48affc498b | ||
|
|
d9b628e764 | ||
|
|
9cfb95e621 | ||
|
|
89f28a24d3 | ||
|
|
8c6734325b | ||
|
|
e72f3c26a0 | ||
|
|
f3c3ec9aec | ||
|
|
dc19d42d37 | ||
|
|
68d16d8a07 | ||
|
|
5103594a2c | ||
|
|
e42b3d285a | ||
|
|
48cf7c838d | ||
|
|
b6fe7e8c10 | ||
|
|
7a4ae6fbff | ||
|
|
8e053ac9d2 | ||
|
|
e981a9810d | ||
|
|
70532a65f8 | ||
|
|
c6172d167a | ||
|
|
f70df01e01 | ||
|
|
8f8fea2d7e | ||
|
|
cd6a83117c | ||
|
|
6c45c9efc3 | ||
|
|
44b83460b2 | ||
|
|
c87a270e5f | ||
|
|
63c16f5ca8 | ||
|
|
4cc7bc6c93 | ||
|
|
68361f9c2d | ||
|
|
98f0016fc2 | ||
|
|
66758599a9 | ||
|
|
d47d6ecbc3 | ||
|
|
78ec7d886d | ||
|
|
5fb261ce27 | ||
|
|
636d831d27 | ||
|
|
deec538335 | ||
|
|
164e606cae | ||
|
|
5686fed40b |
7
.github/workflows/_release.yml
vendored
7
.github/workflows/_release.yml
vendored
@@ -395,8 +395,11 @@ jobs:
|
||||
|
||||
# Checkout the latest package files
|
||||
rm -rf $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}/*
|
||||
cd $GITHUB_WORKSPACE/libs/partners/${{ matrix.partner }}
|
||||
git checkout "$LATEST_PACKAGE_TAG" -- .
|
||||
rm -rf $GITHUB_WORKSPACE/libs/standard-tests/*
|
||||
cd $GITHUB_WORKSPACE/libs/
|
||||
git checkout "$LATEST_PACKAGE_TAG" -- standard-tests/
|
||||
git checkout "$LATEST_PACKAGE_TAG" -- partners/${{ matrix.partner }}/
|
||||
cd partners/${{ matrix.partner }}
|
||||
|
||||
# Print as a sanity check
|
||||
echo "Version number from pyproject.toml: "
|
||||
|
||||
29
.github/workflows/check_core_versions.yml
vendored
Normal file
29
.github/workflows/check_core_versions.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Check `langchain-core` version equality
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'libs/core/pyproject.toml'
|
||||
- 'libs/core/langchain_core/version.py'
|
||||
|
||||
jobs:
|
||||
check_version_equality:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check version equality
|
||||
run: |
|
||||
PYPROJECT_VERSION=$(grep -Po '(?<=^version = ")[^"]*' libs/core/pyproject.toml)
|
||||
VERSION_PY_VERSION=$(grep -Po '(?<=^VERSION = ")[^"]*' libs/core/langchain_core/version.py)
|
||||
|
||||
# Compare the two versions
|
||||
if [ "$PYPROJECT_VERSION" != "$VERSION_PY_VERSION" ]; then
|
||||
echo "langchain-core versions in pyproject.toml and version.py do not match!"
|
||||
echo "pyproject.toml version: $PYPROJECT_VERSION"
|
||||
echo "version.py version: $VERSION_PY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $PYPROJECT_VERSION"
|
||||
fi
|
||||
44
.github/workflows/codspeed.yml
vendored
Normal file
44
.github/workflows/codspeed.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: CodSpeed
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
paths:
|
||||
- 'libs/core/**'
|
||||
# `workflow_dispatch` allows CodSpeed to trigger backtest
|
||||
# performance analysis in order to generate initial data.
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
codspeed:
|
||||
name: Run benchmarks
|
||||
if: (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'run-codspeed-benchmarks')) || github.event_name == 'workflow_dispatch' || github.event_name == 'push'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# We have to use 3.12, 3.13 is not yet supported
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
# Using this action is still necessary for CodSpeed to work
|
||||
- uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: install deps
|
||||
run: uv sync --group test
|
||||
working-directory: ./libs/core
|
||||
|
||||
- name: Run benchmarks
|
||||
uses: CodSpeedHQ/action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODSPEED_TOKEN }}
|
||||
run: |
|
||||
cd libs/core
|
||||
uv run --no-sync pytest ./tests/benchmarks --codspeed
|
||||
mode: walltime
|
||||
1
.github/workflows/run_notebooks.yml
vendored
1
.github/workflows/run_notebooks.yml
vendored
@@ -61,6 +61,7 @@ jobs:
|
||||
env:
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -59,6 +59,7 @@ coverage.xml
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
.codspeed/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
|
||||
[<img src="https://github.com/codespaces/badge.svg" title="Open in Github Codespace" width="150" height="20">](https://codespaces.new/langchain-ai/langchain)
|
||||
[](https://twitter.com/langchainai)
|
||||
[](https://codspeed.io/langchain-ai/langchain)
|
||||
|
||||
> [!NOTE]
|
||||
> Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# lock to 0.10.19 due to a persistent bug in more recent versions\n",
|
||||
"! pip install \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch"
|
||||
"! pip install \"unstructured[all-docs]==0.10.19\" pillow pydantic lxml matplotlib tiktoken open_clip_torch torch"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -409,7 +409,7 @@
|
||||
" table_summaries,\n",
|
||||
" tables,\n",
|
||||
" image_summaries,\n",
|
||||
" image_summaries,\n",
|
||||
" img_base64_list,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import toml
|
||||
@@ -104,7 +105,7 @@ def skip_private_members(app, what, name, obj, skip, options):
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "🦜🔗 LangChain"
|
||||
copyright = "2023, LangChain Inc"
|
||||
copyright = f"{datetime.now().year}, LangChain Inc"
|
||||
author = "LangChain, Inc"
|
||||
|
||||
html_favicon = "_static/img/brand/favicon.png"
|
||||
@@ -275,3 +276,7 @@ if os.environ.get("READTHEDOCS", "") == "True":
|
||||
html_context["READTHEDOCS"] = True
|
||||
|
||||
master_doc = "index"
|
||||
|
||||
# If a signature’s length in characters exceeds 60,
|
||||
# each parameter within the signature will be displayed on an individual logical line
|
||||
maximum_signature_line_length = 60
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
eNqVVnlwE9cZtzkSh1JKmmYgmTGoGwYS4pV2dUuOGx/CxgRbxhb4Koin3Sdppb28h2TZ9UyBkskE0mYHSBNKpg22ZXANBkw54zQUCMz0dtzO2FCTpslASupp66QZmkD6Vge2Y//THY32Pb3v+H3f+33fp+09MSjJjMDn9jG8AiVAKWgja9t7JNiiQln5QZKDSligu2q8db5OVWJGVocVRZTdJhMQGaMgQh4wRkrgTDHSRIWBYkJrkYUpM10BgU6MzilsxzgoyyAEZczd3I5RAnLFK6m1khAh5sYU2KpgBemXG/NAmZKYADQoYWiIQ4BekoHh0ZaRDQyHDLmxjoL7yqlf/KrEIguTa3c7lnphWbyqyAqANsaZKMNBmgFGQQqZ9J2o73TcHMJsUsIqFzDRJpo2VQRFPM7ICK/M8DgHaEYWeByhwXmgqBLEAwKQ6Dhgo8aIGDKZbXZCbMX/Py2so2NzASYJrB6JKkMJ0/ecQEMde0hUcKuRRIHJigQBh7mDgJUhypQgsPLUDAZVPnV3SPT+EqWAB5x+mkmiX1dDEnQqwWJaaNZ0IyERSEgZkULWDYkSumtJYWBql5VCS8irCFYzJqs8n0BqFCuotL6QAIN+QMFkEKIIGF6PFx0hcjESpHW9rKlJQSEQgZSCBDs2d/SEIaARhLGcxV1hQVa0o9Np1w8oCqIkQZ4SaGReOxJqY8QCAw2DLFBgL7oFHqayofVGIRRxwDIxmExraceAKLIMBfRzUwRdU1+Gm7gOZeZxr35BOGIYr2gnvQhESaWpJoHqgzeQRgdhJI614rKC4mYR33EWIDxJMXV+fuqBCKgoMoJnak9LppWPTpURZK27ClDeumkmgUSFtW4gcXbrwNTfJZVXEKu1nrKame4yh/fd9ViMJIk+x6dZlhM8pXWn6HV6mjZUpAROCciI9gaRpAQhykBtNPdBv58K+gNcUZ3fEvNbAhFPzMFyld6AnWcb1Von79vokNpCzFpbiTGyaf26al+8CicdVqvLaXcRZpw0EkaEArdXcrF4bcOGhMjzmzzlDRTXsq6lPFASLfFz3tKm0rWkTfFtiK6vMEf9PlaqdoQ50OJItFRG1pWysKKkwZMI0eIGX4uDV8iGoLVGoBWy1Ot0rPfVVjdWEy22RiHibamzSaFCA4Ksxhi6iKvdaC8JcYlgIF6/NlaxRoow6yo3EZsIm9PjquQCpRba72+CSn1TaApmh43AiSxswuok9OdoljIs5ENKWOu0OchDEpRFVP9wRxIlUlHl7V2InvA3V3syjfCg97lJZj/W5UFU1QbLJabAQDoNJaJkMBNmm4G0ui02N2EzVFT5+soybnyzMvO4TwK8HETsXJOthB4qrPJRSPeWzVoDg3oNoPvV4aP2g8NWUZAhnkGl9TXgtekRgFd6BtIFh6OOCXimLeVWG0wVQ7ytNU5TKk2HY3GOcLVZLaiTqFTwZEYF9Q3dDQKEc7LWZbbbzEczR1k+9qJgCZxEuSXfasUllAuW4RiU0NQ3w4sq+tb7OioJm57yczOlMuMKedAlzswUUIQoRIMtmb60EWyqhAQ5xHYd4nRnVhd63ppdctKhLvXm7EJZpw6Xy2y2fsWSDL8SXCfJyedmykx6Is2cfGamQMZLl83JyX2tWXmcobWRFWjjt9sdpJ12WkiHy0zazRQBbAGCouwOGyRsdpfjrN6ZKWRHp5AoSAouQwqNeiWhjRRwoFVveEUW0maxo8wVokFMsSoN69SAR9ADlQsNogT12dpPBXEKUGjSpVmv9Xgaq0uqKstONeBT6Yt7xfTfjB5ekHkmGEzWQQmxQetNDQ/UuSWYLCvHa0satZMui9kKzRDaKcJqpUknXop6YtbafbJ36W2/B7AIe4zSBsKWIsxttVqwQgMHipx2K0Gk/oxsS6an0OXcL5bvystJPXN311UJ14iFg5/X5z2zaueecy3vN7ePLmo+PWeQfmrhmiRnO7HiFrV58Qe//MYrj1yrtGz9ztLty5bf+WTtMqz0+0tefnirTe37Y+C/nwxuudS/8e6/N+7/jDv+zs3lQ6tuDw+c9YIh8/B/8l8rHBh/NXKy+Mq+j7cGX3azA78bWtGIL/m586HizgXfwo8M8fuv/zY50XqPeGnPRwUuMnr5saarV/tfWFy66K83Invn39u2+9Keiaodfx5a8O3bO/JyOz3tc4eb+Px9i1bOvbpXebJ5ZOhr5JwDtVjI98Ib4mf55TcvPLnas+WZic//8rcrv/59/b+4R5/t45g/jT93uOLTc8Pt+fP2bTkx9rOHx26E6i/n5Vb+5O27xX+PWBNPT7zozaupu/NAd/QXF4H8w7HiU7m3u68L7+Ut6aKU21/YzZe2foQ/+tPXD9aurmi+cKH39CPSx+cxX/dLo6NPr977Xan55gOHv/6j/PZlr++c99B7ptEFo4fKz18/nP/8wYGRwFj3TjBe/s/357k/PLT03aEPPyh/583iisSt7z3L7j7x+K5thQvzP51f/4Th8cF7AweazxY+eKbv3qq7OcTzo0CL7pgf+rF32a8Wnmq/039s7KZhacH+GwfAuHP/qSfmvzK8i6feHj9ycaXpbN8fwhcvjhYfnvjH3W++u2Js5VOeK7dM6Jq//HJujrk3ea14Xk7O/wDBk1ep
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
eNrdVn1sG2cZT5sxJg1NXVWl0iTWkzexUvKe787n80eWjcRJvCRNnMZJ1sCQeX332nfxffXeO9txSBEZ7B9GywlEN5WKZkvsYkLarqXNOjIYowiham2lDS376KZOY6qoJm1ITEMp5T3HIYnaf/gX/3F3z/t8/Z73+b3P66lKHllYMfRNc4puIwuKNhGwO1Wx0D4HYfv7ZQ3ZsiHNDCSSQ887lrK0S7ZtE0f9fmgqtGEiHSq0aGj+POsXZWj7ybepolqYmbQhjb+1OT7h0xDGMIuwL0p9c8InGiSXbq8I9riJyJfPRkXb11x/ExlaiLJlhMmzYFCK5rl7CxSGGnrUN9lMrfnWtCnHUr0Aa0KUmKy8faugHVM1oEQXlJyiIUmBtGFl/Z5kepIHXiPA/bbsaGm/5JckfzxjgoKCCWas6ECDkoINHRAgQIe2YyGQNqAlFaCao8fMrJ8LCoxZBP+bl2/y/62cbxHklqHWynEwsny1Fc2QUA1/1rQBb3jl6URkyRvbFoIaETJQxajGA80khPSik1WGDk1WZAQlQtcrDVtmZAPb7vxGCh6HoohIYKSLhqToWffX2ZJiNlMSyqjQRlWCXkc1grvVHEImgKqSR+UVL/cENE1VEaGn94+R8ubqNAVeW25VV72yAOmNbrunEwREW7d/YJycFZ1i6SBHMyeKANtQ0VXCfaBCgqds1vQvrVeYUMyRIKB+Dt3yivP8ehsDu7N9UEwkN4SElii7s9DSBP7U+nXL0W3CBrcSG7g1XV25li5AswzNn9wQGI/rojtba8TZDc7ItsaBaJAY7jRTFg0jpyB36dNUSsyk0lqrkYh7GyyoMnQgsvhOmAoPjPfE5K6BXEc8m97XGeO0hJTePVAAbIiLBAOBIMcBlmZolmYBcvql3GBAlNsH421DhfDI7t6SmAwybK/V7bAdOb3EBfYGeh0uI/eMFYdjI85jQ5ZhyomOflW3u5VMHg2hIh4Z3tsed5jQiP1YtygXWiiCzskrUqusBVA6nuzXVCFt8bkIEvQunBrtpEfp4fZSfJ80UOht0zvbpGTfOng8HwRMHaHA8GHG+82vckNFetaW3eeD4cAxC2GTHBD0ZJlsme3gqRnCQ3Thz5X69Hsu0btG4aaZDsJJd7HP0JspjqUSok1xDMdTbCgajEQZgYr3Dc3F6mmGbkvBk0MW1HGG0LBzlfIVUXb0HJKqsduSfdEjO+mkB58cT4CKpoERqKNy5/aCwZW5D7o7Tq2cLEBGCtSVUi2tu1hjfaFULEiiI0lyvqAxkRIfUNLIETOn6y6mZXhpCCCgYXeGZyLMfF21SrwqKZYBLAMY9lwRkHOOVEVTyIbWnvXbh/gGyW4v3GpgGzlE7qnySjteXm9gIY0Q1su9FoWPRCK/vb3RaqRQJExafW6jEUbrsbCchhduNahHmGVZQcNzxVUHoEju0oNESDHhtBQgzOE5nuEy4SBMh3hOFNMRGBTFgMC+SIafIpJAXjtNw7IBRiK5a+1xd6lZg0VvyrQG2GBAILW2UIouqo6Ekk66w/CqwC2UaSHvIjge6wIxKJKxnKwx0K10jPa39XXHzuwF66kEEubKPV/RDawrmUw5iSzSGbcqqoYjkXFpoTKJNdg26p4OS0KAC0REQQoJvChxoJ0MotVo/yXejDdrK1Al2POie0oOtPqiPB/wtVAabA0LPMPU/g18r+zVqmfPb8I7fnhXQ+3XqO55pf8dZsviR1/reuShnX27vzoRo6Y3bx32z7bv5F5Tr14tPp09s+3Sv1t6hOv6F7bvuHL4s78tfpK78+gTv2o6qhp/vOfy9ocOHbz40XevXV9+4cae/Y+2vlRaeHHh4/2t1w5t21/9ztClGztOfzofPcF3HLy6U5UPX7rrwHzqQPWDV8898nrJd+fY5YMv0D3Tvzz2l57tH559bjl694TVNAreFxobikeusGn5k1984+vP/u6BRu7wPW+kW+44uvXze/c8tXTyGWrXe+7Ppr79zODNrtfaSneM+8wnr/34lQd/TgmN91//+1b6cfVPb8IPbzx9/Gzbww57ufXiclPq3PsLxtb7ygyNpo99JX9k0zt9P3j8pjPAXt4ye2/p98x9I8OzoafGCh98efJh3J5efrk6/4/Elw43nQ9fnAgl//rm/W8fOXPhn29NPvGvZ8+Xp+9+94uH1Pzg8oWp2Cl7+tVtP3l7+Q+cUN71o66534wsRScaGxpu3mxsCP705MyBzQ0N/wEq8t3d
|
||||
eNrdVn1sE2UYL0MBJ4ZgAEEEmgNBYG/v+rGuLRJXxpgT18HWgJtBfHv3tr2tvTvug22QQWQoCQh4fMSA8jHWrdJMWBk4BwwVI98BMUCsCyAQGQaMkQXFBJ3vdd3G3PjwX5vLte/7fP2e5/k9T7osvBCJEstzfWpZTkYipGV8kNRlYREtUJAkL68JItnPM6FZufnuKkVkYy/6ZVmQHCQJBdYAOdkv8gJLG2g+SC40kkEkSdCHpJCHZ8p+SEpbTARh6XyZL0acRDiMlMmSQnToEI43FxMiH0CEg1AkJBIpBM1jFJwcl8hlgiaRUamMJfEvB+EUkV72Iwm/S3g9G9T8aBd6CQbRK0R5SqddXIYNJV4RaXzRKVDEAL7W3g6iIxdFCPCQMZSwxWwQMSw08KKP1E6CdiJxckFcFlL2K0EPyZAMQ2Z5BVDCShiwxHIgCBlW4jmAkQAOyoqIgIeHIlMCA8WGIsFHmlKtlFAK/psVUf6/y2de+TxMAJ5BGlo6ABUGATNIBdiMQzIIQBmTjigP+xFkMDMv6QaH/Lwkq9EebNsNaRoJMkAczTMs51M/9S1ihRQ9g7yalwiteYzTWY0UIyQAGGAXovpSIMmQ5QKYg0DGxeEVWd3pynXPz8qek+mqaXeq1kFBCLA01MzJIgyuNsFMoBW9pzii8RfgBnGy2uDsgEnOKsOzw+kpg8VuoOruDx2AGHGNEJcfuF8gQLoY+wGJuVRr2o133a/DS2p1DqRz87u5hCLtV6uhGLRaumUpKpyWqBrOmNUzXELYGS5sNhiN+Il28yyVcbRa7YUBCUU7m9BpEzFRJjOgrIAyNnTzjWSxDNA8DqFWUrs6KhhAnE/2q1Wp1rRPRCQJmDuoogabyYq0LISbiU4dCyd2xI7cmV1UGB6ajhurNs0Q2RS90aZ3CqIeh07VGy0OM35S9Vk57tqMRBh3r42KukXISV7crMwO3oRpv8IVIyaS0StjYkRXxiKOH2CDrAwS6xE3UjuqIQtFUbHxD9UUURBXRosYMtvt9kf4xZVBsrpXyw9QFmC0uRNZmgp7j8NygoLpGd+0CVQ1GiqMa9Ij9buwddiMfwybByA0F8Ym9GaNR60HxGpbPNrkR+t3QUzYTHgcmwdATC2M6Xsz/1f52gONe4jm/YVr19Y/VPuBJYskOg9YRj2If8+njMYMV5bLLOVbTJkSnFZiQaKL87oau/zjpQ45dlGc3ZpdbJzZbjWnMmYPQB4vAyx2Wxqw201G4DGZbIzFZkyzMNaqhSxUI3jI9T6e9wXQbtoLaEjjnd0+g2p4eoHLmZOdUfsGyOM9PCajG2LScjyHavKRiMdejdABXmHwohVRTcYMkOcsUPfazSYLMpkpK4PsNq/NA6bhBdUxjZ3TFtK2dPzfwTt45kV89U2fPWNWDdDFP32Z2c1FI50D/xpa2aLUbZl58tDQJBAc87z/6eDEGVPGevufWWMdm3PhyN3LJ7kXXr2xPj2rhfh5+Vl6VcV7Fxe1Xjn69ZWWXz86unbLkqYv2m6nb7sZ/uNynXPKmpdzo5OePNJvVajRGZm9eg+sqCh1fb9jpnvq7l1zGyZv/Dg2p/nO7976S1zUOSFmG/nZUhe548iKTYc2DFsatb49dXJFTtKC7V8mu4ffTT/93YJ+TvbHyXcvtQ52REfv/fCpAeMHnxnhqLw64bXFwvmU67oLz+2s7F+1QT/k+sDKDyadu9Woc8713Hhmv3uEvHT5xMJB+ed+G2DIvrP/c+eOW+fPHku/dsw95PXt12Kbg/2cyQPde7eNzW+tpd8tuDKx6NazGadOJo9qLH8pr37FprCJeiJ7a+uJ7HWHR2+11amZx/Vnxq3eSs5+a974RaeHjaouaD5OrTyy6Xz65oaVdsuqoz/dnVv6d/P2ze8fOBxaF808vcS3jzmoO7F2Yz0dkHJ2frX/XtNUeIret+X41nrP4qbGpuJv1ZtlLclkQQmfdYEshbfb6LxflJaLF/+8N0ina2vrqyu2XU1an6TT/QMhrxTS
|
||||
@@ -0,0 +1 @@
|
||||
eNqNVQ9QFGUUB01TEZNySkvh3EFNYbm/4t2pJB5IJhwEV4AE9N3ud+zK3u66u4cCYolUBpYtCIikph53chFCMpGjNlqTWqOOjaVcWua/STNrpnIsNem74wBN/HNzf3a/937v/d7vvbdX7i6CgkhzbHALzUpQAISEbkS53C3AxQ4oShUuO5QojnSmpWZYtjoE2juJkiReNCqVgKdjACtRAsfTRAzB2ZVFaqUdiiIogKLTypHF3oulmB0szZe4QsiKmFGt0uiisV4XzJhTigkcAzEj5hChgEVjBIdIsJLfIhXzPosEl0rI4v8xYglQJATaChUSBRUkRzjsyN2IlUX3+fceIozIOQQCnfXZHAKDjn3fRqy3Cp60cdZFkJD8FaA7pQjsPANj0CVWVpZblosYcyT0YQgGOEiIa/HpuMixLJRwBkhIJKzMTUFAIiV/DApzUpwoye13qbMdEATkJRyyBEfSbIH8UUEJzUcrSGjzRfEQvoh++WVPIYQ8Dhi6CO5YiosSoFkGiYZLtB1yDkluNqda8pPmv5xodvUEldsAzzM0AXxw5SJEriUgJe4r/W6zxyc4jrrASnJnfC9NZVox6jWrUMXoDDGqtttTMwAxdvF++67bDTwgClEcPDBHsqsH3Hq7DyfKTSmASM24IyQQCEpuAoI9VndHlYKD9RUqu01pd6cLGPvSubUxajV6t98RWSxmCbnJBhgRtvc1oQ/j0ag0WlwVi6vUnXfEhpJQjBMcSiFvVrX2KshAtkCi5K0arWabAEUe7Qdc6UIwySGWO1Ez4aGD7sBQb0ld0D8KY50JqLHynnkCHa1Q6xXxvKBAqacr1DqjVmfUxSqSUiwtpkAay4CNarcIgBVtqFmJvXPjJigHWwhJj2nAifFi/RULKD9D22kJD6wzaqTvVnbqVCqVd/J9PQVoR8r4Mjq1BoPhAXGRMlCSO3z14SodrtZbAlWqFw6ch2Z5BxpP/6MhwMrlY4V4TXugfz+3Xszkh8Dcg6FmoXfKQGi0andRbNL7s0U92L+fYgAz5WEw96AYu9CrGAj+P/l6EkXex/N24Xq8Fff1vqdknkDncZqUd6PrfJVabTInmXXZMxLjs6hkQJloW+JC8/Sd/fE5oQCwdIl/un04b6TWEKudTmqtOLTaSFxn0M/ADQaNGrdqNHpSp1fP0JGxW4toIHvQkisKOK6AgdsJG04AgoJ4zw7K7oRsc3zKfFNLFp7OWTk0jBaAhpblWOjKgAJae9lDMJyDRA9aAbpM8/D0+Gy5w6DV6KAmVg/1ep3eNgPic9EDqncb+7bN6XtK+//NVqCdF9DRl4NgRNWwIP9rMPp0dzMZC7iTqpG3oiqOmN+zlQXFjUpc4t1qKvXWTXw1ev9xLMPz/OGQylvX4ouqi66MWhFcfT2PDH9nhyt2T8flL721y3/89c0/xc5dN67+e6x02d9PLzkdt+7ri3HpDVgY85Vl8pD8Ec+eTZC/s7geq9vdnHJOnbN+T2PuoUhVzZG6qgv/eK07dbX0oKj29dcPnAuzXEotjlu+SDxtCF4tq4ZTr4cGl78aMXvflauhXUfPb8wC/Pf7JlOPXpikyJkz9ASxQPz094vF6WNqvv4rKH5DSXntzpmgdSSz8hpQhDTOppJ+mlDf8GLyN4PPmkd8cuTYeFbzRdH1w5m2yOTIEWDmoPcPVgEpWvFX4+Mlq9a1pYydOXVHhGlL+CtrT62JmzbWeEJ6o2N4WUTX7xNDsleFP/NLeefa/ftOhO9SHTwY3fXEeOvQAx+YXrc/v2fvyLQR5qkbsZqMRYnmcQkvffyouvpkXcKEzp/GlJyqLxvTmt7wuXL/4nWj13QMKWZG57xAnumsWX2rLfP8m+8afsg7fShz91mrrYL/Y24FMYeaNTFzdEZX9aqQ1eGzfjPFnZpzyf1zFqgZtkRzQTuuamJzyE3M+XYluJh2Oq8Ry0l+sqI6qmH8dtw1euqKBKay++zHB7411hmWjeu8/GH1scK9oZWFe9MjsnRntk1yduc56spvlniHNK96bp5IbrKua3Rvqg0LXbxh2KS25tD23EIzMyp081vJp95rNpTmN8y6teZy0jm9LbewK6yqub4+M7OWbrtE/RD5dFNr94Ts8TfePvrZlTdalFEF9U+lpr4W7JulwUFReZXHyUeCgv4Dl57hHA==
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1 @@
|
||||
eNqVVQlwE1UYTkG5VMQqtMoIIRTQmpfsJmmbZCi2tBwFmnSgaCmF8rL70myz2V336AHWoYjjiBZdULAql6RJibUHVqkn4gUiakcULSA6g6Iz1dGRDohcvk3TAymHmUw27/3/9//ff+6qcBkSJYbn4hoYTkYipGR8kNRVYRE9pCBJXh0KINnH08E894L87YrIdEzyybIgOc1mKDAmyMk+kRcYykTxAXMZaQ4gSYIlSAp6eLrycJy6whCAFcUy70ecZHCShMVmNPToGJyLVxhEnkUGp0GRkGgwGiges+DkqESuFDSJjCpkLIk+nIZsJFEi40F62Yf05Qjih6hnOHxkJD0TwGadhipjLzh6g9ESr4gUvugVKCKLr7Vfp6EnIEVgeUibyhk/E0A0A028WGLWToJ2MuMIAzg3ZtmnBDxm2kzT5lleAZQzEmYtMRwIQJqReA5gToCDsiIi4OGhSJdD1m8qFUrMlpRUQqgA/w+FwzFQkPKhYi05OF39okCCDwVw1VhDVdWSqiU4tTyNtJAoFio0AlaQArBtDsmAhTIup6Eq7EOQxjU/prst6OMlWW25rI5NkKKQIAPEUTzNcCXqqyXLGcGop5FXsxKhNIvRRlEjfoQEAFmmDL1WASQZMhyLqwtknEFekdUdLnd+8aycB2a4Qt1G1WYoCCxDQQ1uLsXkGmI1B1pMl4sjWmcAXEVOVndl9tA051XiruT0hMnmMBHN/V2zEDMOCVH52/0FAqT82A6Idbwa6gY39tfhJbUuF1LuBZeYhCLlU+ugGEi1XRKlqHBaoGo4K+9ydzFhr7uw1USS+NtyiWWpkqPUOi9kJdTSW4ReTMRCWKyASAUEuesS20gWKwHFYxfqNqKxJ4Ms4kpkn7rdRpD1IpIE3GDo0RCGyYq0KoiLiQ7sC8em72X33L5WSAxm48Kq784UGaOetOszBVGPXafoSZvTanOmWPWzcvMbsmJu8gcsVEu+CDnJi4s1o6dvwpRP4fyIjmQN2DEdhr6IReyfZQKMDGKLBxdSO6pBG0EQHZOvqimiAM6M5jFodTgc17CLM4NktVWLDxA2QNrzu6O0OQoH9sNwgoLbM7rDYqxCGivMK/ma+n3cejCTrwMzMMMUorBjykBoPGqXUayzR73dd239PooxzJTrwVyBorWwQz8Q/D/p63aUdBXN/onr1tZfVfuKKYvEKg8YWn0H/y8mSDLLNcuVkuaF4hx/djZX5vJYCly+N/vs480POWZ5tLs1XEeS1ZFqTaGtHoA8XhrYHPY04HBYSOCxWOy0zU6m2ejU7WUMVCN4yPUlPF/CoibKC6KrG3TPoBrOXuTKzM3JaigA83kPj5sxH+Km5XgOhRYgEY+9GqFYXqHxohVRKGsmmJ+5SG11WC02ZHE4aA9B270OCkzHC6pnGnunLaht6eh7txrPvIivPh40afyTw3TRz+Ci+Z9xR4mbz42evnHpO5k7c9sPjW+/o6pozdE1nsy5RS8W1IrZpXdP3Lc7YWfasK/W69d9WDvhzG/2tkM1h8c38uPeOvf32b0bz0y5MOKH3WUHz39//sKRjpQvNjxROnqHx934kOPjuzIWPpBFfttG3rimUZnmGd56tu3tI0z8hCO/N53elfbt2Xf9L7k9SScO3/9lU27N5uSHqwpPE6MOrE7NGzKmuXp0xUebdeEniPsKVq3VbZto/GRv/NjnicVF7+/N/2BQXMLzHUO2TnGT6xJfWvTm8X1vFGWi9oODGlYKJ0zbE+WW9iEvLArfOPWXIrmzfuuGT89lxLX/EhzBLt4yd5x1z9Tm27aufjLPvmCpXrz/u7vSudCGIa1d6Ttaun4k195SDxN+YM8mVdes/3XmzlP13yQsq7u4ddScxsyxw2aOnBAfPP7PTW22FzeamroKs4eOGbS25Smjr+n9abcP77pz4YOqNKf6hrdWOw4kul9ZOW/8HUlxc5c9vbAA7d83vy48NfUZyz2b7t1d+13c2I+edY1Q3Urx7T9PrPQv+ynx2IbWXJ93+Ij0LTnLZp+ccTS58/vDnS2PRR6ZV/jg0tDnnmMFgYzqtaV7MipOtGUkHDSuXJJc9vPSC6kukPv4vPqcos7ZH+yZM6Y85570MfHuxufiqeP7l//5+2tU7Zq7jZtmf/36rpqTvw51/j2vc/bIP06mlW853TSplji38T2uKPnUyOaLXX/9datOd/HiYJ11p+NMzWCd7l+O/AW4
|
||||
@@ -0,0 +1 @@
|
||||
eNqVVQtMFFcUhWBNSesPatQaZTtQCHVn/+AutFVYkaDlIyBVROHtzNvdkdmZceYtHxFT0aZRInGMn0YiFllYu0GEQksVq1YtNRVNtaaKMbSptpjaRq1S06TVvlkW0IqfbiY7+969597z7rn3bZW3BIoSw3PBzQyHoAgohBeSXOUV4So3lNCGJhdETp72ZGXm5Da4Rab3dSdCgpSg1QKB0QAOOUVeYCgNxbu0JXqtC0oScEDJY+Pp8svBbAXhAmWFiC+GnEQk6HUGk5oY8iESllUQIs9CIoFwS1Ak1ATFYxYc8ltQuaBYECxD2OJ/JRDzoESJjA2qkBOqSiHAL1HFcHjJSCrGhcMmEJXqYbB/B6Ml3i1SeGPY4BZZvK18JxBDB3ILLA9oTSlTzLggzQANLzq0ykpQVlp8QheujRY53S6bltbStDbVLpCljIRZSwxHugDNSDxHYk4kB5BbhKSNByJdCthizUrBoTXExeuEMvL/oYjKyuWVy3HVeBoqbCkWuGlIGsk4EsM4iEgWIKwUUel1QkBjOfuCJnmcvITktsckOgAoCgqIhBzF0wznkPc7VjOCWkVDuxLFRykR/T0g+4ohFEjAMiWwvYyUEGA4FgtHIlwc3o3kjzMycwtT0/JSMpoGg8qtQBBYhgIKXLsSk2sOyEkqRX/c7FNEJ7FAHJI7k4ZoarPKccNxKp3GZNHoWh9OzQLMuEnw27seNgiAKsZxyEAzy02D4JaHfXhJbkwHVGbOIyGBSDnlRiC64k2PnFJ0c8pBZa816/F0AeNwOq9Ro9fjp+2RyFI5R8mNdsBKsG1YhGGMz6AzGEldPKnTdz4SGyKxnKR4nEKu17UMVZCFnAM55QZjvGmfCCUB9w5c34RhyC1VebCYsOeUNzBYezMXjrTCVM88LKz8xXyRUav0ZlWSIKpw6jiV3pRgxI9BlZqe22wNpMkdVai2XBFwkh2LlTLUN17K6eaKIe2zjtoxvcTIiUWcn2VcDCIDdwoWUlnKHpNOp+uNfqqnCF24MkpGj9FisTwjLq4MRHKHcj5SZyL15tzBUxos+aPnYTjBjdvTfz0FWDUprDCvN57pP8JtCBP9HJjRGRp1+b0xo6HxqD1GsdHszzbr2f4jFAOYmOfBPIGiIb9XNRr8P+UbTBT1FM+HCzforXqq9xNL5gsoTzK0fBj/LtTp9daM1AwjvXgRYpKd4hJudn4GPd92cCQ+vtQBx6z2d7eC640yWuKNcbTRRkKbnSZNFvNs0mIx6EmbwWCmTWb9bBMd31DCANmHh1zl4HkHCw9QdpICFL6zB2dQ9s5bmpGUnmZtXkJm8zYeN2MuwE3L8RxsyoEiHnvZR7G8m8YXrQibrPPJ7KSlcofFaDBBg95mMtvN+LGRyfiCGprG4WnzKLe0/y91HZ55EW99FXwjovrFIP8nhM7+sua7uS//M7lYU3B+8bKSSd9M/lyduiM8eXvi1ei8mLrm33vDfPcTFxxKaTZvvnbv7772t/qPxXaAVq6zb80Dw5XSZRFH1lZMe/vG1J9v3773S46zKjPyjNpwoiY5vOPawffsbM1P9aHWtG3W6bbuXc6BtQOtnxDJRy63dN6rHSiZvmtitXnjB39+/Vf0r3eLucK6+DcHxsQk30xb98JhR8jM1shjq6JyQ7StMXMvhef1W8+MO/nOvi1CVXjFhEhff6hNE5delBrdtd5bty1o+48n4zMmLv308na6qGVD6o7xupX62oHtXX1gz6x1M7wpwX8U1DJ5XTU94y79MM3jk85V7MoxT7hwbElP2Z6YU9fHlAnZsSkfXZ2y9f3VyTNevb4gI+pWgb49M2v8S59NTToYlpjSVSOc7uo5d0etDlXvTYxtHLskLLWAmLll4KaGkk+eTfntUGRs0ZWNZ9ZuUme90lM9fnZEz4mj6e3151/LQQsunp74oXT2IlpYI+vO19vnRmzafLq7sL93ygV0fM2Y6ig0RTOntmhF9+6wfPnbsVs77vjCVgQ39C8Ii+3Ilrjk5fdXVIxXUwO7d0ZufL/kzq3b1nCiu65kUWj/4a7ObQPj3j1+d06e7+ikAl67cw5W9cGDkKD9dd/vnhoSFPQvid/bWw==
|
||||
@@ -0,0 +1 @@
|
||||
eNqVVntsHEcZd5UqIEJBEVFLkVqm25AIenPvc+8OFOE6TpqaOIltWhqIktndudvJ7e4sM7M+X4wFsSukCvHYBglERKsmZ5teQxuLKlVUqKo2EqUU2qrQyJUKfaj9J0X8gQQSUjDf7J19NnEeWOe9m5nv8ft9r9mpuTEqJOP+daeYr6ggloKFjKbmBP1mSKV6YNajyuF2c++ekdGToWALn3OUCmQ5lSIBSxJfOYIHzEpa3EuNZVIelZJUqWya3G68ef2hCcMj4wcVr1FfGuVMOptPGEsyRvnrE4bgLjXKRiipMBKGxQGFr+IT1Qj0iaLjCk7ir7KxnUpLMJMi5VBUpwS+BGI+LJlEzAOzZWMysawc74C25KGwYGP5IBQubOtn2VgiFAYuJ3ayzmrMozYjSS6qKb0K9CoFDD2ITUo5oWem7JRtp3ZWAlxnElBL5mOP2ExyHwMm7BMVCopNToRdJ24teTioprKF3nQwjv8/LaBjWMRy6EEdHAjXChY0cKgHWXONyckDmnYnmkRKJhUkZ2VIjdHLhQyRIKBESKQ4gtASeJBQsUroJlBAhHIbyHJ5aDeQTRqozpSD6LhFXRfsojEmmclcphpJpD3IWgOBYQIHpgAIyHRD2lYC0gGYYkKEsm1RIqkEVcDORsQSXAIi1bbTOScClB2mAJZvd4AiKNoGclnVUfFupU2qkUAyrEJhKeZXkUVcDzYUMd0ubQiGzeISbzuJbWjpjqJ2v1UiGwodNnWNecy2wQCvxCvgn2hzMUXsXoa+dJhPEXPd0GOQwCXFqqDUhyfkIgZZ59yGjW5uNQBBwZvPIR02s4gGpj0JwnwNnQtgwAWSFAh3SWAAiVzOaxLw13S+Aioq1FIJQOva3SS1I1QH4xJV2Rh4d3i9Ay6GtSrtumBsjd7jUqc8DNoURdurcgigp1rAoWSMgYTp8rqPiMlDlTRWlN+1NvNI6HlEsCO6m4nSNVmAMAlbgrUDkwdgUkDIdIdaLgltinO4gKFVfKqwSxSky5icAyw2jLC/9GxsOoA7mr9kLD1JLIsGClPf4ppf9MvqERYkkE0r2krL0hbjuRe1apQGmLgQq1+NYwgI810YVljBQACS0WNDe0YP7tx178DQbNtodBpi6HZSlzoM4E51WGPN+NLjlo4Nhp7zVfR03xLM1N4GDFkfpZP5UjJ9eqVrlwDi2SA+f2blQUCsGtjBnQEezbaVn1gpw2U0s5tYe0ZWmSTCcqIZIrze/CqWIvQ10Wiuf++l7jqHy+7mcslMBj7zqyzLhm9FMxXiSjq/nIRlnVY2nc3hdC9OZ55eZZsq0cAWBxfRo+knliII46WqnKiZyWTTvxBUBtC2dHoW9FQop5qQTfryi3Od2+TEnsFuLXy6uR0yG/1mh2AJlCmivkAg8F1AmXw5ly8XCmjn7tFT/R0/o2tman4UppesQLYGlgpnznJCv0btVv+aJbNgdClD01CXeUzhzkUKmdTLqJlPp9MLW64oKagHodEem7lSqXQVuxAZqqKnND+czuNMcbTDMrd/bT/MD0Koz/hO7qCa1agA1xeuKt/FtqSz5Rp0LoMwv39h61ra0GuXQJwpxt7uuLp8F2JHZ+u16FwGYmH/AlpL/X/C13a0+QqSKwPXlkZXlL5syFqdzGNmR7+G3wfTmUz/0M6hwogpB746zO7KkcFhNnTk7rNd+/AmQ3x2JK5urbewOVfqzRXsnImpWbFxvlS8E5dK2Qw2s9minS9m7szbvSfHGIla0OWoynnVpU9aFRy/iuB2D0Zz2+8f6tu9q//U1/AwNzkU4yiBovW5T2dHqIC+j1rxPQ6TVtDZ/h14uO/+6KlSLpunWbNgmul0sVKy8F0woZa6cbnbmnpMx++RR6Hn9YV8bvGz3/toT/y3Dv4XF7/xg77BF7686YHFe376jx0P5QcWTn7klRMDG6PTb33p8MsfeD/714033LZ4YP+Hjz68/oPjN1f6zj9v/qh/5MfP3HJh8J4/v/7eCw//8OLfLpy/WBmfPvrH238+vPndT1w//fnvHntne3nfbz82fduZTYfemR7E7x//1vFBe8sfHj/R+v2J1qG3b79j/vlnUz/ZN/PIyMXEja882Ohd2Lb5pebf7/7KP9/6XbG67dyfXrrhjU+ue63Y+6nSgx/f8O6Z1/KZ/3zn1czGian3N031lb99ZkP51mO9n5kwj+ILbz73xcVd33/kRXbT2bNDm27aFtUW58XEfTefa54/duHfGzTLdT1/Xb9+377renr+C7bLfqw=
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
eNqVVX1QFOcZPyVNIBPUpGDbyUe3ZzpUw97t3u19Qc4OHHhBQJCvYhql7+2+x663X+zH3QFSjXEytVh1zTRJaeq0ctxZQsAERdHaZExUEtO0TTK1YEpnNE2b6EzaVBtRE/vucRQY/ad7M/fus+/z/p7f8z6/93m3pqJQUTlJXDDAiRpUAK0hQzW2phTYpkNV25YUoMZKTKK2pr6hV1e48RWspslqkd0OZM4myVAEnI2WBHuUtNMs0OzoXeZhGiYRkpj2iQWbOq0CVFXQClVrEfb9TistoViiNm1o7TJEb1YNxjVrYWZENgNVWuFCENNYiMUgQIOCcSIyORXjBIRm7SrEZtenP7XoCm+CzBpFyGV6tM4Q12VeAowtxkU4ATIcsElKq920ZNMyExAQebvG6kLIztgZxh4My3iMUxFvlRNxATCcKok4YoSLQNMViIckoDAxwEdsG+VWu8PlJuQ4/v+tsnZ1rUfMFYlPp6OrULGmvwgSA9P8W2UNpyQzPRGZJBpVTYFAQEYY8CpMb54goyqa6OgrYfN0pVgIGFTjScuSBCupmjE4v25DgKYhAoYiLTGc2Gq81NrByYUYA8M80GA/Yi/CtCqM/giEMg54LgqT06uMA0CWeY4G5rx9I0pvIFNb3CzLrdP9Zlo4qo2oGQdrEImSCnttOxKYiJE2l8NGHIjjqgY4kUeCwXmA+CTl9PyxuRMyoCMIBM+I10hOLx6c6yOpRl81oGvq50EChWaNPqAIbmp47ndFFzWkBiMVqL01XGZyNpzTRhI26uV5wGq7SBt96UIcnrcYako7TksIw/gVkaQlKcJBY/yzlhY63BIS/FJN0NxgN88CHUCFKgct3tr21QF2VW2kLNgaaisPOIQaJlRVG8NJj8PncjpdDgdO2ggbaSNxqK9hInVOmi2tC5Y0xLxNVZUddL2LICuVCp0si4gdDmezs1J3hNnVG+ONgSb9sQZFktmasjW8qFVw4ShsgHG1qbG5NKgTnibtsQqajRVjiJ0e5Rg/KzhhKFi/RuDdIYWK+KBbXKW2rCu3rbM1lnYE25jaWGWJWF7C1FfPoUdRLpzIMHQTlJcwn8EZbfBQbNVYo9fpJfYrUJXRAYFPJdGWabq6NYF0CN8eS2Vaxr6aylkJL02UIU0ax6slsRBzkFgNrWEOwkFhpKfI5SsiSCxY3TAQyIRpuK0EX25QgKiGkQzLZySfolldjECmP3BbsR83xY4qadJHxxOHcVlSIZ5hZQw043XTzRKvKBuePlk4ailA5DrSYY3jadXHOuIxhtYZho3GBMLXQTlRd9Pp8MHMElmRzDCIEC4g7ZI+kiIGM3MzyutH2RI4SeAEeTSOo4MOeU7g0I6m/zM9WzUSLrTdR2510KQIRN09OV2P3851UKCAFGsGn0WhfD7fb27vNIPk8fkchO/ofCcVzuVCOgT1yK0OGYSEyyeoA/EZf5xjjPGHkdHi8RJunxOQTreTIhkv6SF9XsIXIimHz+FyAdcoan4cjXDMcsqSouEqpNEFpbUb44UCiJtdxu8kXU43SrUY3Rw0rzOwXg+VSWYSajEmK9C8CIYCq/AAoFFbrk8r0EiVrVtTUl0RGGnG50oJr5GnL8eUKKkiFw4n66GCCmP007ykM6hdKjCJsOpK1hkHvYzb6UC/cJhyUDTjwEtRI5pB+5/wEmavTQEecY/SxjDr9FuLKMppLcYE4Pe6KYJIX6FPJs1cxdaTC+/8Zne2Jf1k8XXV0jninuPXv7fk6KXzeVP8xAdP/+Jp8NqW3O8cANns9pFDY/seebd6eN/Nrt3PuL8x0b3o03/593RsWmHZEQtnnRrZP3Lx9Q8vPHf99OlznT2TXwxc+vzKtXPrb1yroDbtXfzl0PLhrS9+l7lSrsPRi1m//ns8IVgb3hocvnIjeqh54KMXjX/8ee3YK8mzb+4p+PfHm3w/m6xKni0AZyb3PJ97Y4XFsveUZ9fXH/T35Fad+Vpi2b38GL+h21J1fve9T1k35PX2fFb1+M7VbS9cC1YeKHj/2fuuZk/cd3XR/QsvdZ54+Jf35D+Zuy2HKNr9rVOBnM4hHGt40/NSaf6dC8cfPHX3tp7/YEsblxtZ98dl5SujjWN/W+nGAu/t3r/l7OKF2V1Y3o67Fy3+9h2v9spb/MnBOz7Nznr/hS136W/n/KF/8z8/+fhkfu9Xu/0lP5jMefTqCjj0aPUzPwrlrCyS2vj1F7IKBk60t53549QDPTuEnjc6hSHb5sK1/uyPXju/5ETO5h/+BJPfKxj88I1Xzx/atWFCsObtnDpWPupZdv0Jy42Heh/6svL3l7cf23D4reIfj8vun//lgdzfTR195/LGNutP81/v4/2P/LXtg6K6yyW2d042H8RHh6OPPzHw+cjFzqWn87v4ySN5BQXL7lr7ycihXbl9K6cuLPjTF9HRV87ufPZwd9/yvd2bUcFv3syyNDGD727Pslj+C1CB3ug=
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
eNqVVn1sU9cVd4DR0Q2BNm10Slk9M5UK5dnvy3acNGsSO5gQEpskREmqLFzfd5334vfF+4jtMG8t7R8VbJ1epMLUqa1IHbvLXKcosDIYdO3E1mls1dBWmjGBVHUaYk3VMlVlzSp237PdJIJ/5j9877n3nHN/55zfPfcdKk4gTRcUua4kyAbSADSwoFuHiho6YCLdeLIgIYNXuHw81tf/oqkJCzt4w1D1Jp8PqIJXUZEMBC9UJN8E5YM8MHx4rorIcZNPKFz2b2smD3okpOtgDOmeJvejBz1QwWfJRkUwsirCM4+BMoanoTpimUM61IQEchs8cqcRwIPmFmQsCrpbkLA3T67BvWzvLI2ammg7WRaasEpl9NSAm6qoAM6bFlKChDgBeBVtzGdLqi3ZAUgYvM/gTSnh43wc54smVSIt6Bi3LsiEBDhBV2QCIyJkYJgaIhIK0Lg0EFPecXXMR/sDpJoh/j8rTy43gpFriuiEY+pI8zgrksIhB/+YahCsYocnY5HCo25oCEhYSAJRR07yJBVX0faOV0lv0F5TFFFfneukKTuFtn19PrczJQPJUajme9S2tZUqxVCrep7IXWpjq6lAww4wj3THm6phfmiGgCpiTdGeI9m0cT/q0U1Zztq2UFRMzplpQMBLdug1vDhOQbYzZO9iYgoa4hzrmsuVykpiHEEDK+dGckUeAQ7DueranOcV3bDKq1k7ByBEOK1IhgqHj7BeHpsU1AY3h5IiMNAsrp2MnPRYsymEVAKIwgQqVKysV4CqigIE9r5vHBe3VGU2YWO5c3vWLiqBmSkb1skYBtHW6Ytn8fWS3ZTXT3vJVzKEbuDoRXxdCBFgPAXV2T+7ckMFMIWdENWraxUqxuWVOopuzXQDGOtb5RJokLdmgCYF2PmV65opG/guWMVw/M7jqpvLxzFeivSyJ1Y51rMytGYcGr66yhgZWpaACvZhHScLUFFSArIWbo6OwuRoQmpRYlE7wQGRByZAGtsBRhvj2d1hfmc8FYmOJQ50hGkpxiX2xNMEFaRDfobx0zRBeUkv5aUIZPZwqV4G8u290bb+dOPAnq5J2OcnqS6t06QiKXmSZgaZLpNO8rvHM/vCA+aufkxKPhbpEWWjU0hOoH6U0Qf2DbZHTTI4YOzqhHy62Y3RmRMC18JLDEpE+3okMZDQ2FQIBeSd+uhQh3fIu699MnqAi6e72uSONq6vewU8lvUTZBVhgGQbSftXrnFDRPKYwVsvBhj2JQ3pKm4P6IkCTplh6ofymIfo4pvFasOcjnUtU/jr+QjmpHWuW5Eb3DTljkHDTZM066aCTf5QE0W6o939pXD1mP67UvBEvwZkPYlp2FGjfBHyppxC3Gz4rmQ/Z5MdV9KGj5sTgTKqoiOiisoqDRK9laeC6IzMV24WgRsqkIVJ51jrnMP69GQmzUGT4/iJtESGJlkG9w8TJk9WTXCvsI/BgAhJt/IsSTPl6laNeLM4WJKgSIKkfml3BIjvmR2NqmgGoSOIXycjay00SCBjX7IWhvIzAZz5ZvxsQNHkUJ+ZiCgSpqbe7FY1ZL8CZzIEbpdIFCQBV8b5r758GIQfG5++U8FQUgi/kYVKXc+vVNCQ7d4OYtkLGwqFfnV3pZqnYChEk6Ezq5V0tBILRUv66TsVqh7y/pCklzI1fULgrIVvY2G0EaJgMgQDITZBBgAHko0sSNI0pGCSoZgQnAvvJMIA4jepzyGgVYwM9bR1d4Z/MUisZBIRUytfBkVZ0WUhmSz0IQ0Xxpp1+jbulhoqYF+9bUPWyUYuwNBMIpgASchCjibacR+qefucd3m71RaBiGs3Aa15nmnxNLEs42l2S6ClMcCSpPP98Hih0v0v1P30gSNfdDm/teLekdSV1s3fuzw3+MHQvVMNzSeufTrVOvzQk633zjz1sUCMaGfOXi2rez78gW/qka6b2iYmOM+cfWDDY/SPhh+/tPXwB0/88Xzu9tmlj25dOn/s9ZaB2PzsO5fmlq68ENv01ie+HVu/9v3Pdg/3zNYvLu3v/GH6nuFo+/E/nzpNHPv3gvhdYvv0JxsX9r69/vc7XngmO4T+M3LU9/Qbf6rvLg182FznuvWTq9R06vrIy+5YbFfs4jtTw889UvfS9cWnHuICDdcjM1t6D25/r/CXt7a+fjTedHj//bui+yfFDeS6N950f/PtdQ83vtoFUvm166QN20qLrb9Wmlsvu5aef8x/OJ5ff/Lj8G+Cl8PbpPW/PTFfX2p5d0n/0r+ubP7s2R//9xvX7hs7/fNt3zoYfDD5UVv3+9+pu8VIqVtK3Y7ItXt+duE1q37jkcTe+7/813+0bjnzsPXuFxZPvV/e85paqv8Ke3QuUGZvPP3skUCsfCP3/KfPXDg+fWBx+7H3ruczv2vPnaPKM/dNh/6ufvXGeHnLxn1/GJqZyt3c5HLdvr3WtebiqX9eXONy/Q8Vr/3y
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -6,5 +6,5 @@
|
||||
|
||||
- `BaseChatModel` methods `__call__`, `call_as_llm`, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.invoke` instead.
|
||||
- `BaseChatModel` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.ainvoke` instead.
|
||||
- `BaseLLM` methods `__call__, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseLLM.invoke` instead.
|
||||
- `BaseLLM` methods `__call__`, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseLLM.invoke` instead.
|
||||
- `BaseLLM` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseLLM.ainvoke` instead.
|
||||
@@ -15,7 +15,10 @@
|
||||
* [Messages](/docs/concepts/messages)
|
||||
:::
|
||||
|
||||
Multimodal support is still relatively new and less common, model providers have not yet standardized on the "best" way to define the API. As such, LangChain's multimodal abstractions are lightweight and flexible, designed to accommodate different model providers' APIs and interaction patterns, but are **not** standardized across models.
|
||||
LangChain supports multimodal data as input to chat models:
|
||||
|
||||
1. Following provider-specific formats
|
||||
2. Adhering to a cross-provider standard (see [how-to guides](/docs/how_to/#multimodal) for detail)
|
||||
|
||||
### How to use multimodal models
|
||||
|
||||
@@ -26,38 +29,85 @@ Multimodal support is still relatively new and less common, model providers have
|
||||
|
||||
#### Inputs
|
||||
|
||||
Some models can accept multimodal inputs, such as images, audio, video, or files. The types of multimodal inputs supported depend on the model provider. For instance, [Google's Gemini](/docs/integrations/chat/google_generative_ai/) supports documents like PDFs as inputs.
|
||||
Some models can accept multimodal inputs, such as images, audio, video, or files.
|
||||
The types of multimodal inputs supported depend on the model provider. For instance,
|
||||
[OpenAI](/docs/integrations/chat/openai/),
|
||||
[Anthropic](/docs/integrations/chat/anthropic/), and
|
||||
[Google Gemini](/docs/integrations/chat/google_generative_ai/)
|
||||
support documents like PDFs as inputs.
|
||||
|
||||
Most chat models that support **multimodal inputs** also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
|
||||
|
||||
The gist of passing multimodal inputs to a chat model is to use content blocks that specify a type and corresponding data. For example, to pass an image to a chat model:
|
||||
The gist of passing multimodal inputs to a chat model is to use content blocks that
|
||||
specify a type and corresponding data. For example, to pass an image to a chat model
|
||||
as URL:
|
||||
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "describe the weather in this image"},
|
||||
{"type": "text", "text": "Describe the weather in this image:"},
|
||||
{
|
||||
"type": "image",
|
||||
"source_type": "url",
|
||||
"url": "https://...",
|
||||
},
|
||||
],
|
||||
)
|
||||
response = model.invoke([message])
|
||||
```
|
||||
|
||||
We can also pass the image as in-line data:
|
||||
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "Describe the weather in this image:"},
|
||||
{
|
||||
"type": "image",
|
||||
"source_type": "base64",
|
||||
"data": "<base64 string>",
|
||||
"mime_type": "image/jpeg",
|
||||
},
|
||||
],
|
||||
)
|
||||
response = model.invoke([message])
|
||||
```
|
||||
|
||||
To pass a PDF file as in-line data (or URL, as supported by providers such as
|
||||
Anthropic), just change `"type"` to `"file"` and `"mime_type"` to `"application/pdf"`.
|
||||
|
||||
See the [how-to guides](/docs/how_to/#multimodal) for more detail.
|
||||
|
||||
Most chat models that support multimodal **image** inputs also accept those values in
|
||||
OpenAI's [Chat Completions format](https://platform.openai.com/docs/guides/images?api-mode=chat):
|
||||
|
||||
```python
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
message = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": "Describe the weather in this image:"},
|
||||
{"type": "image_url", "image_url": {"url": image_url}},
|
||||
],
|
||||
)
|
||||
response = model.invoke([message])
|
||||
```
|
||||
|
||||
:::caution
|
||||
The exact format of the content blocks may vary depending on the model provider. Please refer to the chat model's
|
||||
integration documentation for the correct format. Find the integration in the [chat model integration table](/docs/integrations/chat/).
|
||||
:::
|
||||
Otherwise, chat models will typically accept the native, provider-specific content
|
||||
block format. See [chat model integrations](/docs/integrations/chat/) for detail
|
||||
on specific providers.
|
||||
|
||||
|
||||
#### Outputs
|
||||
|
||||
Virtually no popular chat models support multimodal outputs at the time of writing (October 2024).
|
||||
Some chat models support multimodal outputs, such as images and audio. Multimodal
|
||||
outputs will appear as part of the [AIMessage](/docs/concepts/messages/#aimessage)
|
||||
response object. See for example:
|
||||
|
||||
The only exception is OpenAI's chat model ([gpt-4o-audio-preview](/docs/integrations/chat/openai/)), which can generate audio outputs.
|
||||
|
||||
Multimodal outputs will appear as part of the [AIMessage](/docs/concepts/messages/#aimessage) response object.
|
||||
|
||||
Please see the [ChatOpenAI](/docs/integrations/chat/openai/) for more information on how to use multimodal outputs.
|
||||
- Generating [audio outputs](/docs/integrations/chat/openai/#audio-generation-preview) with OpenAI;
|
||||
- Generating [image outputs](/docs/integrations/chat/google_generative_ai/#image-generation) with Google Gemini.
|
||||
|
||||
#### Tools
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ structured_model = model.with_structured_output(Questions)
|
||||
|
||||
# Define the system prompt
|
||||
system = """You are a helpful assistant that generates multiple sub-questions related to an input question. \n
|
||||
The goal is to break down the input into a set of sub-problems / sub-questions that can be answers in isolation. \n"""
|
||||
The goal is to break down the input into a set of sub-problems / sub-questions that can be answered independently. \n"""
|
||||
|
||||
# Pass the question to the model
|
||||
question = """What are the main components of an LLM-powered autonomous agent system?"""
|
||||
|
||||
@@ -126,7 +126,7 @@ Please see the [Configurable Runnables](#configurable-runnables) section for mor
|
||||
LangChain will automatically try to infer the input and output types of a Runnable based on available information.
|
||||
|
||||
Currently, this inference does not work well for more complex Runnables that are built using [LCEL](/docs/concepts/lcel) composition, and the inferred input and / or output types may be incorrect. In these cases, we recommend that users override the inferred input and output types using the `with_types` method ([API Reference](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_types
|
||||
).
|
||||
)).
|
||||
|
||||
## RunnableConfig
|
||||
|
||||
@@ -194,7 +194,7 @@ In Python 3.11 and above, this works out of the box, and you do not need to do a
|
||||
In Python 3.9 and 3.10, if you are using **async code**, you need to manually pass the `RunnableConfig` through to the `Runnable` when invoking it.
|
||||
|
||||
This is due to a limitation in [asyncio's tasks](https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task) in Python 3.9 and 3.10 which did
|
||||
not accept a `context` argument).
|
||||
not accept a `context` argument.
|
||||
|
||||
Propagating the `RunnableConfig` manually is done like so:
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"\n",
|
||||
"To view the list of separators for a given language, pass a value from this enum into\n",
|
||||
"```python\n",
|
||||
"RecursiveCharacterTextSplitter.get_separators_for_language`\n",
|
||||
"RecursiveCharacterTextSplitter.get_separators_for_language\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"To instantiate a splitter that is tailored for a specific language, pass a value from the enum into\n",
|
||||
|
||||
@@ -50,6 +50,7 @@ See [supported integrations](/docs/integrations/chat/) for details on getting st
|
||||
- [How to: force a specific tool call](/docs/how_to/tool_choice)
|
||||
- [How to: work with local models](/docs/how_to/local_llms)
|
||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||
- [How to: pass multimodal data directly to models](/docs/how_to/multimodal_inputs/)
|
||||
|
||||
### Messages
|
||||
|
||||
@@ -67,6 +68,7 @@ See [supported integrations](/docs/integrations/chat/) for details on getting st
|
||||
- [How to: use few shot examples in chat models](/docs/how_to/few_shot_examples_chat/)
|
||||
- [How to: partially format prompt templates](/docs/how_to/prompts_partial)
|
||||
- [How to: compose prompts together](/docs/how_to/prompts_composition)
|
||||
- [How to: use multimodal prompts](/docs/how_to/multimodal_prompts/)
|
||||
|
||||
### Example selectors
|
||||
|
||||
@@ -351,7 +353,7 @@ LangSmith allows you to closely trace, monitor and evaluate your LLM application
|
||||
It seamlessly integrates with LangChain and LangGraph, and you can use it to inspect and debug individual steps of your chains and agents as you build.
|
||||
|
||||
LangSmith documentation is hosted on a separate site.
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/how_to_guides/), but we'll highlight a few sections that are particularly
|
||||
You can peruse [LangSmith how-to guides here](https://docs.smith.langchain.com/), but we'll highlight a few sections that are particularly
|
||||
relevant to LangChain below:
|
||||
|
||||
### Evaluation
|
||||
|
||||
@@ -5,120 +5,165 @@
|
||||
"id": "4facdf7f-680e-4d28-908b-2b8408e2a741",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to pass multimodal data directly to models\n",
|
||||
"# How to pass multimodal data to models\n",
|
||||
"\n",
|
||||
"Here we demonstrate how to pass [multimodal](/docs/concepts/multimodality/) input directly to models. \n",
|
||||
"We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision).\n",
|
||||
"For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format.\n",
|
||||
"Here we demonstrate how to pass [multimodal](/docs/concepts/multimodality/) input directly to models.\n",
|
||||
"\n",
|
||||
"In this example we will ask a [model](/docs/concepts/chat_models/#multimodality) to describe an image."
|
||||
"LangChain supports multimodal data as input to chat models:\n",
|
||||
"\n",
|
||||
"1. Following provider-specific formats\n",
|
||||
"2. Adhering to a cross-provider standard\n",
|
||||
"\n",
|
||||
"Below, we demonstrate the cross-provider standard. See [chat model integrations](/docs/integrations/chat/) for detail\n",
|
||||
"on native formats for specific providers.\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"Most chat models that support multimodal **image** inputs also accept those values in\n",
|
||||
"OpenAI's [Chat Completions format](https://platform.openai.com/docs/guides/images?api-mode=chat):\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": image_url},\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e30a4ff0-ab38-41a7-858c-a93f99bb2f1b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Images\n",
|
||||
"\n",
|
||||
"Many providers will accept images passed in-line as base64 data. Some will additionally accept an image from a URL directly.\n",
|
||||
"\n",
|
||||
"### Images from base64 data\n",
|
||||
"\n",
|
||||
"To pass images in-line, format them as content blocks of the following form:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"mime_type\": \"image/jpeg\", # or image/png, etc.\n",
|
||||
" \"data\": \"<base64 data string>\",\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59",
|
||||
"execution_count": 10,
|
||||
"id": "1fcf7b27-1cc3-420a-b920-0420b5892e20",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The image shows a beautiful clear day with bright blue skies and wispy cirrus clouds stretching across the horizon. The clouds are thin and streaky, creating elegant patterns against the blue backdrop. The lighting suggests it's during the day, possibly late afternoon given the warm, golden quality of the light on the grass. The weather appears calm with no signs of wind (the grass looks relatively still) and no indication of rain. It's the kind of perfect, mild weather that's ideal for walking along the wooden boardwalk through the marsh grass.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\""
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"# Fetch image data\n",
|
||||
"image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n",
|
||||
"image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Pass to LLM\n",
|
||||
"llm = init_chat_model(\"anthropic:claude-3-5-sonnet-latest\")\n",
|
||||
"\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the weather in this image:\",\n",
|
||||
" },\n",
|
||||
" # highlight-start\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"data\": image_data,\n",
|
||||
" \"mime_type\": \"image/jpeg\",\n",
|
||||
" },\n",
|
||||
" # highlight-end\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ee2b678a-01dd-40c1-81ff-ddac22be21b7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [LangSmith trace](https://smith.langchain.com/public/eab05a31-54e8-4fc9-911f-56805da67bef/r) for more detail.\n",
|
||||
"\n",
|
||||
"### Images from a URL\n",
|
||||
"\n",
|
||||
"Some providers (including [OpenAI](/docs/integrations/chat/openai/),\n",
|
||||
"[Anthropic](/docs/integrations/chat/anthropic/), and\n",
|
||||
"[Google Gemini](/docs/integrations/chat/google_generative_ai/)) will also accept images from URLs directly.\n",
|
||||
"\n",
|
||||
"To pass images as URLs, format them as content blocks of the following form:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source_type\": \"url\",\n",
|
||||
" \"url\": \"https://...\",\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "fb896ce9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fca4da7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The most commonly supported way to pass in images is to pass it in as a byte string.\n",
|
||||
"This should work for most model integrations."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9ca1040c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ec680b6b",
|
||||
"id": "99d27f8f-ae78-48bc-9bf2-3cef35213ec7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions.\n"
|
||||
"The weather in this image appears to be pleasant and clear. The sky is mostly blue with a few scattered, light clouds, and there is bright sunlight illuminating the green grass and plants. There are no signs of rain or stormy conditions, suggesting it is a calm, likely warm day—typical of spring or summer.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_data}\"},\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the weather in this image:\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" # highlight-start\n",
|
||||
" \"source_type\": \"url\",\n",
|
||||
" \"url\": image_url,\n",
|
||||
" # highlight-end\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8656018e-c56d-47d2-b2be-71e87827f90a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can feed the image URL directly in a content block of type \"image_url\". Note that only some model providers support this."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered clouds, suggesting good visibility and a likely pleasant temperature. The bright sunlight is casting distinct shadows on the grass and vegetation, indicating it is likely daytime, possibly late morning or early afternoon. The overall ambiance suggests a warm and inviting day, suitable for outdoor activities.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -126,12 +171,12 @@
|
||||
"id": "1c470309",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also pass in multiple images."
|
||||
"We can also pass in multiple images:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"id": "325fb4ca",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -139,20 +184,460 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Yes, the two images are the same. They both depict a wooden boardwalk extending through a grassy field under a blue sky with light clouds. The scenery, lighting, and composition are identical.\n"
|
||||
"Yes, these two images are the same. They depict a wooden boardwalk going through a grassy field under a blue sky with some clouds. The colors, composition, and elements in both images are identical.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"are these two images the same?\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\"type\": \"text\", \"text\": \"Are these two images the same?\"},\n",
|
||||
" {\"type\": \"image\", \"source_type\": \"url\", \"url\": image_url},\n",
|
||||
" {\"type\": \"image\", \"source_type\": \"url\", \"url\": image_url},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model.invoke([message])\n",
|
||||
"print(response.content)"
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d72b83e6-8d21-448e-b5df-d5b556c3ccc8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Documents (PDF)\n",
|
||||
"\n",
|
||||
"Some providers (including [OpenAI](/docs/integrations/chat/openai/),\n",
|
||||
"[Anthropic](/docs/integrations/chat/anthropic/), and\n",
|
||||
"[Google Gemini](/docs/integrations/chat/google_generative_ai/)) will accept PDF documents.\n",
|
||||
"\n",
|
||||
"### Documents from base64 data\n",
|
||||
"\n",
|
||||
"To pass documents in-line, format them as content blocks of the following form:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{\n",
|
||||
" \"type\": \"file\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"mime_type\": \"application/pdf\",\n",
|
||||
" \"data\": \"<base64 data string>\",\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6c1455a9-699a-4702-a7e0-7f6eaec76a21",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"This document appears to be a sample PDF file that contains Lorem ipsum placeholder text. It begins with a title \"Sample PDF\" followed by the subtitle \"This is a simple PDF file. Fun fun fun.\"\n",
|
||||
"\n",
|
||||
"The rest of the document consists of several paragraphs of Lorem ipsum text, which is a commonly used placeholder text in design and publishing. The text is formatted in a clean, readable layout with consistent paragraph spacing. The document appears to be a single page containing four main paragraphs of this placeholder text.\n",
|
||||
"\n",
|
||||
"The Lorem ipsum text, while appearing to be Latin, is actually scrambled Latin-like text that is used primarily to demonstrate the visual form of a document or typeface without the distraction of meaningful content. It's commonly used in publishing and graphic design when the actual content is not yet available but the layout needs to be demonstrated.\n",
|
||||
"\n",
|
||||
"The document has a professional, simple layout with generous margins and clear paragraph separation, making it an effective example of basic PDF formatting and structure.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"# Fetch PDF data\n",
|
||||
"pdf_url = \"https://pdfobject.com/pdf/sample.pdf\"\n",
|
||||
"pdf_data = base64.b64encode(httpx.get(pdf_url).content).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Pass to LLM\n",
|
||||
"llm = init_chat_model(\"anthropic:claude-3-5-sonnet-latest\")\n",
|
||||
"\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the document:\",\n",
|
||||
" },\n",
|
||||
" # highlight-start\n",
|
||||
" {\n",
|
||||
" \"type\": \"file\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"data\": pdf_data,\n",
|
||||
" \"mime_type\": \"application/pdf\",\n",
|
||||
" },\n",
|
||||
" # highlight-end\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "efb271da-8fdd-41b5-9f29-be6f8c76f49b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Documents from a URL\n",
|
||||
"\n",
|
||||
"Some providers (specifically [Anthropic](/docs/integrations/chat/anthropic/))\n",
|
||||
"will also accept documents from URLs directly.\n",
|
||||
"\n",
|
||||
"To pass documents as URLs, format them as content blocks of the following form:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{\n",
|
||||
" \"type\": \"file\",\n",
|
||||
" \"source_type\": \"url\",\n",
|
||||
" \"url\": \"https://...\",\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "55e1d937-3b22-4deb-b9f0-9e688f0609dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"This document appears to be a sample PDF file with both text and an image. It begins with a title \"Sample PDF\" followed by the text \"This is a simple PDF file. Fun fun fun.\" The rest of the document contains Lorem ipsum placeholder text arranged in several paragraphs. The content is shown both as text and as an image of the formatted PDF, with the same content displayed in a clean, formatted layout with consistent spacing and typography. The document consists of a single page containing this sample text.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the document:\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"file\",\n",
|
||||
" # highlight-start\n",
|
||||
" \"source_type\": \"url\",\n",
|
||||
" \"url\": pdf_url,\n",
|
||||
" # highlight-end\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1e661c26-e537-4721-8268-42c0861cb1e6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Audio\n",
|
||||
"\n",
|
||||
"Some providers (including [OpenAI](/docs/integrations/chat/openai/) and\n",
|
||||
"[Google Gemini](/docs/integrations/chat/google_generative_ai/)) will accept audio inputs.\n",
|
||||
"\n",
|
||||
"### Audio from base64 data\n",
|
||||
"\n",
|
||||
"To pass audio in-line, format them as content blocks of the following form:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{\n",
|
||||
" \"type\": \"audio\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"mime_type\": \"audio/wav\", # or appropriate mime-type\n",
|
||||
" \"data\": \"<base64 data string>\",\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a0b91b29-dbd6-4c94-8f24-05471adc7598",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The audio appears to consist primarily of bird sounds, specifically bird vocalizations like chirping and possibly other bird songs.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"# Fetch audio data\n",
|
||||
"audio_url = \"https://upload.wikimedia.org/wikipedia/commons/3/3d/Alcal%C3%A1_de_Henares_%28RPS_13-04-2024%29_canto_de_ruise%C3%B1or_%28Luscinia_megarhynchos%29_en_el_Soto_del_Henares.wav\"\n",
|
||||
"audio_data = base64.b64encode(httpx.get(audio_url).content).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Pass to LLM\n",
|
||||
"llm = init_chat_model(\"google_genai:gemini-2.0-flash-001\")\n",
|
||||
"\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe this audio:\",\n",
|
||||
" },\n",
|
||||
" # highlight-start\n",
|
||||
" {\n",
|
||||
" \"type\": \"audio\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"data\": audio_data,\n",
|
||||
" \"mime_type\": \"audio/wav\",\n",
|
||||
" },\n",
|
||||
" # highlight-end\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "92f55a6c-2e4a-4175-8444-8b9aacd6a13e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Provider-specific parameters\n",
|
||||
"\n",
|
||||
"Some providers will support or require additional fields on content blocks containing multimodal data.\n",
|
||||
"For example, Anthropic lets you specify [caching](/docs/integrations/chat/anthropic/#prompt-caching) of\n",
|
||||
"specific content to reduce token consumption.\n",
|
||||
"\n",
|
||||
"To use these fields, you can:\n",
|
||||
"\n",
|
||||
"1. Store them on directly on the content block; or\n",
|
||||
"2. Use the native format supported by each provider (see [chat model integrations](/docs/integrations/chat/) for detail).\n",
|
||||
"\n",
|
||||
"We show three examples below.\n",
|
||||
"\n",
|
||||
"### Example: Anthropic prompt caching"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "83593b9d-a8d3-4c99-9dac-64e0a9d397cb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The image shows a beautiful, clear day with partly cloudy skies. The sky is a vibrant blue with wispy, white cirrus clouds stretching across it. The lighting suggests it's during daylight hours, possibly late afternoon or early evening given the warm, golden quality of the light on the grass. The weather appears calm with no signs of wind (the grass looks relatively still) and no threatening weather conditions. It's the kind of perfect weather you'd want for a walk along this wooden boardwalk through the marshland or grassland area.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input_tokens': 1586,\n",
|
||||
" 'output_tokens': 117,\n",
|
||||
" 'total_tokens': 1703,\n",
|
||||
" 'input_token_details': {'cache_read': 0, 'cache_creation': 1582}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = init_chat_model(\"anthropic:claude-3-5-sonnet-latest\")\n",
|
||||
"\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the weather in this image:\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source_type\": \"url\",\n",
|
||||
" \"url\": image_url,\n",
|
||||
" # highlight-next-line\n",
|
||||
" \"cache_control\": {\"type\": \"ephemeral\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"print(response.text())\n",
|
||||
"response.usage_metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9bbf578e-794a-4dc0-a469-78c876ccd4a3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Clear blue skies, wispy clouds.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'input_tokens': 1716,\n",
|
||||
" 'output_tokens': 12,\n",
|
||||
" 'total_tokens': 1728,\n",
|
||||
" 'input_token_details': {'cache_read': 1582, 'cache_creation': 0}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"next_message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Summarize that in 5 words.\",\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"response = llm.invoke([message, response, next_message])\n",
|
||||
"print(response.text())\n",
|
||||
"response.usage_metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "915b9443-5964-43b8-bb08-691c1ba59065",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example: Anthropic citations"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "ea7707a1-5660-40a1-a10f-0df48a028689",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'citations': [{'cited_text': 'Sample PDF\\r\\nThis is a simple PDF file. Fun fun fun.\\r\\n',\n",
|
||||
" 'document_index': 0,\n",
|
||||
" 'document_title': None,\n",
|
||||
" 'end_page_number': 2,\n",
|
||||
" 'start_page_number': 1,\n",
|
||||
" 'type': 'page_location'}],\n",
|
||||
" 'text': 'Simple PDF file: fun fun',\n",
|
||||
" 'type': 'text'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Generate a 5 word summary of this document.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"file\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"data\": pdf_data,\n",
|
||||
" \"mime_type\": \"application/pdf\",\n",
|
||||
" # highlight-next-line\n",
|
||||
" \"citations\": {\"enabled\": True},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"response.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e26991eb-e769-41f4-b6e0-63d81f2c7d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Example: OpenAI file names\n",
|
||||
"\n",
|
||||
"OpenAI requires that PDF documents be associated with file names:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ae076c9b-ff8f-461d-9349-250f396c9a25",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The document is a sample PDF file containing placeholder text. It consists of one page, titled \"Sample PDF\". The content is a mixture of English and the commonly used filler text \"Lorem ipsum dolor sit amet...\" and its extensions, which are often used in publishing and web design as generic text to demonstrate font, layout, and other visual elements.\n",
|
||||
"\n",
|
||||
"**Key points about the document:**\n",
|
||||
"- Length: 1 page\n",
|
||||
"- Purpose: Demonstrative/sample content\n",
|
||||
"- Content: No substantive or meaningful information, just demonstration text in paragraph form\n",
|
||||
"- Language: English (with the Latin-like \"Lorem Ipsum\" text used for layout purposes)\n",
|
||||
"\n",
|
||||
"There are no charts, tables, diagrams, or images on the page—only plain text. The document serves as an example of what a PDF file looks like rather than providing actual, useful content.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm = init_chat_model(\"openai:gpt-4.1\")\n",
|
||||
"\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the document:\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"file\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"data\": pdf_data,\n",
|
||||
" \"mime_type\": \"application/pdf\",\n",
|
||||
" # highlight-next-line\n",
|
||||
" \"filename\": \"my-file\",\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
"}\n",
|
||||
"response = llm.invoke([message])\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,16 +652,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "cd22ea82-2f93-46f9-9f7a-6aaf479fcaa9",
|
||||
"execution_count": 4,
|
||||
"id": "0f68cce7-350b-4cde-bc40-d3a169551fc3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'call_BSX4oq4SKnLlp2WlzDhToHBr'}]\n"
|
||||
]
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'weather_tool',\n",
|
||||
" 'args': {'weather': 'sunny'},\n",
|
||||
" 'id': 'toolu_01G6JgdkhwggKcQKfhXZQPjf',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@@ -191,16 +682,17 @@
|
||||
" pass\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"model_with_tools = model.bind_tools([weather_tool])\n",
|
||||
"llm_with_tools = llm.bind_tools([weather_tool])\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\"type\": \"text\", \"text\": \"Describe the weather in this image:\"},\n",
|
||||
" {\"type\": \"image\", \"source_type\": \"url\", \"url\": image_url},\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"response = model_with_tools.invoke([message])\n",
|
||||
"print(response.tool_calls)"
|
||||
"}\n",
|
||||
"response = llm_with_tools.invoke([message])\n",
|
||||
"response.tool_calls"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -220,7 +712,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -9,157 +9,148 @@
|
||||
"\n",
|
||||
"Here we demonstrate how to use prompt templates to format [multimodal](/docs/concepts/multimodality/) inputs to models. \n",
|
||||
"\n",
|
||||
"In this example we will ask a [model](/docs/concepts/chat_models/#multimodality) to describe an image."
|
||||
"To use prompt templates in the context of multimodal data, we can templatize elements of the corresponding content block.\n",
|
||||
"For example, below we define a prompt that takes a URL for an image as a parameter:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n",
|
||||
"image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 1,
|
||||
"id": "2671f995",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "4ee35e4f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
"# Define prompt\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"Describe the image provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": \"Describe the image provided.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data}\"},\n",
|
||||
" }\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source_type\": \"url\",\n",
|
||||
" # highlight-next-line\n",
|
||||
" \"url\": \"{image_url}\",\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "089f75c2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "02744b06",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The image depicts a sunny day with a beautiful blue sky filled with scattered white clouds. The sky has varying shades of blue, ranging from a deeper hue near the horizon to a lighter, almost pale blue higher up. The white clouds are fluffy and scattered across the expanse of the sky, creating a peaceful and serene atmosphere. The lighting and cloud patterns suggest pleasant weather conditions, likely during the daytime hours on a mild, sunny day in an outdoor natural setting.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = chain.invoke({\"image_data\": image_data})\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e9b9ebf6",
|
||||
"id": "f75d2e26-5b9a-4d5f-94a7-7f98f5666f6d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also pass in multiple images."
|
||||
"Let's use this prompt to pass an image to a [chat model](/docs/concepts/chat_models/#multimodality):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "02190ee3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"compare the two pictures provided\"),\n",
|
||||
" (\n",
|
||||
" \"user\",\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data1}\"},\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"image_url\",\n",
|
||||
" \"image_url\": {\"url\": \"data:image/jpeg;base64,{image_data2}\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "42af057b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = prompt | model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "513abe00",
|
||||
"execution_count": 2,
|
||||
"id": "5df2e558-321d-4cf7-994e-2815ac37e704",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The two images provided are identical. Both images feature a wooden boardwalk path extending through a lush green field under a bright blue sky with some clouds. The perspective, colors, and elements in both images are exactly the same.\n"
|
||||
"This image shows a beautiful wooden boardwalk cutting through a lush green wetland or marsh area. The boardwalk extends straight ahead toward the horizon, creating a strong leading line through the composition. On either side, tall green grasses sway in what appears to be a summer or late spring setting. The sky is particularly striking, with wispy cirrus clouds streaking across a vibrant blue background. In the distance, you can see a tree line bordering the wetland area. The lighting suggests this may be during \"golden hour\" - either early morning or late afternoon - as there's a warm, gentle quality to the light that's illuminating the scene. The wooden planks of the boardwalk appear well-maintained and provide safe passage through what would otherwise be difficult terrain to traverse. It's the kind of scene you might find in a nature preserve or wildlife refuge designed to give visitors access to observe wetland ecosystems while protecting the natural environment.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = chain.invoke({\"image_data1\": image_data, \"image_data2\": image_data})\n",
|
||||
"print(response.content)"
|
||||
"from langchain.chat_models import init_chat_model\n",
|
||||
"\n",
|
||||
"llm = init_chat_model(\"anthropic:claude-3-5-sonnet-latest\")\n",
|
||||
"\n",
|
||||
"url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"response = chain.invoke({\"image_url\": url})\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f4cfdc50-4a9f-4888-93b4-af697366b0f3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that we can templatize arbitrary elements of the content block:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "53c88ebb-dd57-40c8-8542-b2c916706653",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"role\": \"system\",\n",
|
||||
" \"content\": \"Describe the image provided.\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"source_type\": \"base64\",\n",
|
||||
" \"mime_type\": \"{image_mime_type}\",\n",
|
||||
" \"data\": \"{image_data}\",\n",
|
||||
" \"cache_control\": {\"type\": \"{cache_type}\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "25e4829e-0073-49a8-9669-9f43e5778383",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"This image shows a beautiful wooden boardwalk cutting through a lush green marsh or wetland area. The boardwalk extends straight ahead toward the horizon, creating a strong leading line in the composition. The surrounding vegetation consists of tall grass and reeds in vibrant green hues, with some bushes and trees visible in the background. The sky is particularly striking, featuring a bright blue color with wispy white clouds streaked across it. The lighting suggests this photo was taken during the \"golden hour\" - either early morning or late afternoon - giving the scene a warm, peaceful quality. The raised wooden path provides accessible access through what would otherwise be difficult terrain to traverse, allowing visitors to experience and appreciate this natural environment.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"import httpx\n",
|
||||
"\n",
|
||||
"image_data = base64.b64encode(httpx.get(url).content).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"response = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"image_data\": image_data,\n",
|
||||
" \"image_mime_type\": \"image/jpeg\",\n",
|
||||
" \"cache_type\": \"ephemeral\",\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"print(response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ea8152c3",
|
||||
"id": "424defe8-d85c-4e45-a88d-bf6f910d5ebb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
@@ -181,7 +172,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"\n",
|
||||
"To build a production application, you will need to do more work to keep track of application state appropriately.\n",
|
||||
"\n",
|
||||
"We recommend using `langgraph` for powering such a capability. For more details, please see this [guide](https://langchain-ai.github.io/langgraph/how-tos/human-in-the-loop/).\n",
|
||||
"We recommend using `langgraph` for powering such a capability. For more details, please see this [guide](https://langchain-ai.github.io/langgraph/concepts/human_in_the_loop/).\n",
|
||||
":::\n"
|
||||
]
|
||||
},
|
||||
@@ -209,7 +209,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Do you approve of the following tool invocations\n",
|
||||
@@ -252,7 +252,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Do you approve of the following tool invocations\n",
|
||||
|
||||
118
docs/docs/integrations/caches/singlestore_semantic_cache.ipynb
Normal file
118
docs/docs/integrations/caches/singlestore_semantic_cache.ipynb
Normal file
@@ -0,0 +1,118 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreSemanticCache\n",
|
||||
"\n",
|
||||
"This example demonstrates how to get started with the SingleStore semantic cache.\n",
|
||||
"\n",
|
||||
"### Integration Overview\n",
|
||||
"\n",
|
||||
"`SingleStoreSemanticCache` leverages `SingleStoreVectorStore` to cache LLM responses directly in a SingleStore database, enabling efficient semantic retrieval and reuse of results.\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | JS support |\n",
|
||||
"| :--- | :--- | :---: |\n",
|
||||
"| SingleStoreSemanticCache | langchain_singlestore | ❌ | "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"This cache lives in the `langchain-singlestore` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-singlestore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c5f2839-4020-424e-9fc9-07777eede442",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51a60dbe-9f2e-4e04-bb62-23968f17164a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.globals import set_llm_cache\n",
|
||||
"from langchain_singlestore import SingleStoreSemanticCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
" SingleStoreSemanticCache(\n",
|
||||
" embedding=YourEmbeddings(),\n",
|
||||
" host=\"root:pass@localhost:3306/db\",\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cddda8ef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm.invoke(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c474168f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The second time, while not a direct hit, the question is semantically similar to the original question,\n",
|
||||
"# so it uses the cached result!\n",
|
||||
"llm.invoke(\"Tell me one joke\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain-singlestore-BD1RbQ07-py3.11",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,378 +1,401 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AWS Bedrock\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatBedrock\n",
|
||||
"\n",
|
||||
"This doc will help you get started with AWS Bedrock [chat models](/docs/concepts/chat_models). Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with.\n",
|
||||
"\n",
|
||||
"For more information on which models are accessible via Bedrock, head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/models-features.html).\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/bedrock) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatBedrock](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html) | [langchain-aws](https://python.langchain.com/api_reference/aws/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `langchain-aws` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/setting-up.html) to sign up to AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by following [these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Bedrock integration lives in the `langchain-aws` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrock\n",
|
||||
"\n",
|
||||
"llm = ChatBedrock(\n",
|
||||
" model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
|
||||
" model_kwargs=dict(temperature=0),\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-fdb07dc3-ff72-430d-b22b-e7824b15c766-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Voici la traduction en français :\n",
|
||||
"\n",
|
||||
"J'aime la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-5ad005ce-9f31-4670-baa0-9373d418698a-0', usage_metadata={'input_tokens': 23, 'output_tokens': 11, 'total_tokens': 34})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Bedrock Converse API\n",
|
||||
"\n",
|
||||
"AWS has recently released the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) integration has been released.\n",
|
||||
"\n",
|
||||
"We recommend using `ChatBedrockConverse` for users who do not need to use custom models.\n",
|
||||
"\n",
|
||||
"You can use it like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "ae728e59-94d4-40cf-9d24-25ad8723fc59",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", response_metadata={'ResponseMetadata': {'RequestId': '4fcbfbe9-f916-4df2-b0bd-ea1147b550aa', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Wed, 21 Aug 2024 17:23:49 GMT', 'content-type': 'application/json', 'content-length': '243', 'connection': 'keep-alive', 'x-amzn-requestid': '4fcbfbe9-f916-4df2-b0bd-ea1147b550aa'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': 672}}, id='run-77ee9810-e32b-45dc-9ccb-6692253b1f45-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrockConverse\n",
|
||||
"\n",
|
||||
"llm = ChatBedrockConverse(\n",
|
||||
" model=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" # other params...\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4da16f3e-e80b-48c0-8036-c1cc5f7c8c05",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"Note that `ChatBedrockConverse` emits content blocks while streaming:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "7794b32e-d8de-4973-bf0f-39807dc745f0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=[] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'Vo', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ici', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' la', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' tra', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'duction', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' en', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' français', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' :', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': '\\n\\nJ', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': \"'\", 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'a', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ime', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' la', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': ' programm', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': 'ation', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'type': 'text', 'text': '.', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[{'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[] response_metadata={'stopReason': 'end_turn'} id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
|
||||
"content=[] response_metadata={'metrics': {'latencyMs': 713}} id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8' usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in llm.stream(messages):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ef05abb-9c04-4dc3-995e-f857779644d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"An output parser can be used to filter to text, if desired:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2a4e743f-ea7d-4e5a-9b12-f9992362de8b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"|Vo|ici| la| tra|duction| en| français| :|\n",
|
||||
"\n",
|
||||
"J|'|a|ime| la| programm|ation|.||||"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"\n",
|
||||
"chain = llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"for chunk in chain.stream(messages):\n",
|
||||
" print(chunk, end=\"|\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the API reference: https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrockConverse features and configurations head to the API reference: https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: AWS Bedrock\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatBedrock\n",
|
||||
"\n",
|
||||
"This doc will help you get started with AWS Bedrock [chat models](/docs/concepts/chat_models). Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with.\n",
|
||||
"\n",
|
||||
"AWS Bedrock maintains a [Converse API](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html) which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html).\n",
|
||||
"\n",
|
||||
":::info\n",
|
||||
"\n",
|
||||
"We recommend the Converse API for users who do not need to use custom models. It can be accessed using [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"For detailed documentation of all Bedrock features and configurations head to the [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/bedrock) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| [ChatBedrock](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html) | [langchain-aws](https://python.langchain.com/api_reference/aws/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"| [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) | [langchain-aws](https://python.langchain.com/api_reference/aws/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"\n",
|
||||
"The below apply to both `ChatBedrock` and `ChatBedrockConverse`.\n",
|
||||
"\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Bedrock models you'll need to create an AWS account, set up the Bedrock API service, get an access key ID and secret key, and install the `langchain-aws` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/setting-up.html) to sign up to AWS and setup your credentials. You'll also need to turn on model access for your account, which you can do by following [these instructions](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Bedrock integration lives in the `langchain-aws` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-aws"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_aws import ChatBedrockConverse\n",
|
||||
"\n",
|
||||
"llm = ChatBedrockConverse(\n",
|
||||
" model_id=\"anthropic.claude-3-5-sonnet-20240620-v1:0\",\n",
|
||||
" # temperature=...,\n",
|
||||
" # max_tokens=...,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "fcd8de52-4a1b-4875-b463-d41b031e06a1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, response_metadata={'ResponseMetadata': {'RequestId': 'b07d1630-06f2-44b1-82bf-e82538dd2215', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Wed, 16 Apr 2025 19:35:34 GMT', 'content-type': 'application/json', 'content-length': '206', 'connection': 'keep-alive', 'x-amzn-requestid': 'b07d1630-06f2-44b1-82bf-e82538dd2215'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [488]}, 'model_name': 'anthropic.claude-3-5-sonnet-20240620-v1:0'}, id='run-d09ed928-146a-4336-b1fd-b63c9e623494-0', usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4da16f3e-e80b-48c0-8036-c1cc5f7c8c05",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"Note that `ChatBedrockConverse` emits content blocks while streaming:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "605e04fa-1a76-47ac-8c92-fe128659663e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content=[] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
|
||||
"content=[{'type': 'text', 'text': 'J', 'index': 0}] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
|
||||
"content=[{'type': 'text', 'text': \"'adore la\", 'index': 0}] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
|
||||
"content=[{'type': 'text', 'text': ' programmation.', 'index': 0}] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
|
||||
"content=[{'index': 0}] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
|
||||
"content=[] additional_kwargs={} response_metadata={'stopReason': 'end_turn'} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
|
||||
"content=[] additional_kwargs={} response_metadata={'metrics': {'latencyMs': 600}, 'model_name': 'anthropic.claude-3-5-sonnet-20240620-v1:0'} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd' usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in llm.stream(messages):\n",
|
||||
" print(chunk)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0ef05abb-9c04-4dc3-995e-f857779644d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can filter to text using the [.text()](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.text) method on the output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "2a4e743f-ea7d-4e5a-9b12-f9992362de8b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"|J|'adore la| programmation.||||"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in llm.stream(messages):\n",
|
||||
" print(chunk.text(), end=\"|\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a77519e5-897d-41a0-a9bb-55300fa79efc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prompt caching\n",
|
||||
"\n",
|
||||
"Bedrock supports [caching](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-caching.html) of elements of your prompts, including messages and tools. This allows you to re-use large documents, instructions, [few-shot documents](/docs/concepts/few_shot_prompting/), and other data to reduce latency and costs.\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"Not all models support prompt caching. See supported models [here](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-caching.html#prompt-caching-models).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"To enable caching on an element of a prompt, mark its associated content block using the `cachePoint` key. See example below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "d5f63d01-85e8-4797-a2be-0fea747a6049",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"First invocation:\n",
|
||||
"{'cache_creation': 1528, 'cache_read': 0}\n",
|
||||
"\n",
|
||||
"Second:\n",
|
||||
"{'cache_creation': 0, 'cache_read': 1528}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from langchain_aws import ChatBedrockConverse\n",
|
||||
"\n",
|
||||
"llm = ChatBedrockConverse(model=\"us.anthropic.claude-3-7-sonnet-20250219-v1:0\")\n",
|
||||
"\n",
|
||||
"# Pull LangChain readme\n",
|
||||
"get_response = requests.get(\n",
|
||||
" \"https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md\"\n",
|
||||
")\n",
|
||||
"readme = get_response.text\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"What's LangChain, according to its README?\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": f\"{readme}\",\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"cachePoint\": {\"type\": \"default\"},\n",
|
||||
" },\n",
|
||||
" ],\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"response_1 = llm.invoke(messages)\n",
|
||||
"response_2 = llm.invoke(messages)\n",
|
||||
"\n",
|
||||
"usage_1 = response_1.usage_metadata[\"input_token_details\"]\n",
|
||||
"usage_2 = response_2.usage_metadata[\"input_token_details\"]\n",
|
||||
"\n",
|
||||
"print(f\"First invocation:\\n{usage_1}\")\n",
|
||||
"print(f\"\\nSecond:\\n{usage_2}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b550667-af5b-4557-b84f-c8f865dad6cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "6033f3fa-0e96-46e3-abb3-1530928fea88",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Here's the German translation:\\n\\nIch liebe das Programmieren.\", additional_kwargs={}, response_metadata={'ResponseMetadata': {'RequestId': '1de3d7c0-8062-4f7e-bb8a-8f725b97a8b0', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Wed, 16 Apr 2025 19:32:51 GMT', 'content-type': 'application/json', 'content-length': '243', 'connection': 'keep-alive', 'x-amzn-requestid': '1de3d7c0-8062-4f7e-bb8a-8f725b97a8b0'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [719]}, 'model_name': 'anthropic.claude-3-5-sonnet-20240620-v1:0'}, id='run-7021fcd7-704e-496b-a92e-210139614402-0', usage_metadata={'input_tokens': 23, 'output_tokens': 19, 'total_tokens': 42, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrock features and configurations head to the API reference: https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatBedrockConverse features and configurations head to the API reference: https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -1,262 +1,393 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "30373ae2-f326-4e96-a1f7-062f57396886",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Cloudflare Workers AI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f679592d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"This will help you getting started with CloudflareWorkersAI [chat models](/docs/concepts/chat_models). For detailed documentation of all available Cloudflare WorkersAI models head to the [API reference](https://developers.cloudflare.com/workers-ai/).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/cloudflare_workersai) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ChatCloudflareWorkersAI | langchain-community| ❌ | ❌ | ✅ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"- To access Cloudflare Workers AI models you'll need to create a Cloudflare account, get an account number and API key, and install the `langchain-community` package.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to [this document](https://developers.cloudflare.com/workers-ai/get-started/rest-api/) to sign up to Cloudflare Workers AI and generate an API key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4a524cff",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "71b53c25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "777a8526",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain ChatCloudflareWorkersAI integration lives in the `langchain-community` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54990998",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-community"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "629ba46f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ec13c2d9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.cloudflare_workersai import ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"llm = ChatCloudflareWorkersAI(\n",
|
||||
" account_id=\"my_account_id\",\n",
|
||||
" api_token=\"my_api_token\",\n",
|
||||
" model=\"@hf/nousresearch/hermes-2-pro-mistral-7b\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "119b6732",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2438a906",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-11-07 15:55:14 - INFO - Sending prompt to Cloudflare Workers AI: {'prompt': 'role: system, content: You are a helpful assistant that translates English to French. Translate the user sentence.\\nrole: user, content: I love programming.', 'tools': None}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='{\\'result\\': {\\'response\\': \\'Je suis un assistant virtuel qui peut traduire l\\\\\\'anglais vers le français. La phrase que vous avez dite est : \"J\\\\\\'aime programmer.\" En français, cela se traduit par : \"J\\\\\\'adore programmer.\"\\'}, \\'success\\': True, \\'errors\\': [], \\'messages\\': []}', additional_kwargs={}, response_metadata={}, id='run-838fd398-8594-4ca5-9055-03c72993caf6-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "1b4911bd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'result': {'response': 'Je suis un assistant virtuel qui peut traduire l\\'anglais vers le français. La phrase que vous avez dite est : \"J\\'aime programmer.\" En français, cela se traduit par : \"J\\'adore programmer.\"'}, 'success': True, 'errors': [], 'messages': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "111aa5d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "b2a14282",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2024-11-07 15:55:24 - INFO - Sending prompt to Cloudflare Workers AI: {'prompt': 'role: system, content: You are a helpful assistant that translates English to German.\\nrole: user, content: I love programming.', 'tools': None}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"{'result': {'response': 'role: system, content: Das ist sehr nett zu hören! Programmieren lieben, ist eine interessante und anspruchsvolle Hobby- oder Berufsausrichtung. Wenn Sie englische Texte ins Deutsche übersetzen möchten, kann ich Ihnen helfen. Geben Sie bitte den englischen Satz oder die Übersetzung an, die Sie benötigen.'}, 'success': True, 'errors': [], 'messages': []}\", additional_kwargs={}, response_metadata={}, id='run-0d3be9a6-3d74-4dde-b49a-4479d6af00ef-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e1f311bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `ChatCloudflareWorkersAI` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/community/chat_models/langchain_community.chat_models.cloudflare_workersai.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: CloudflareWorkersAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This will help you getting started with CloudflareWorkersAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatCloudflareWorkersAI features and configurations head to the [API reference](https://python.langchain.com/docs/integrations/chat/cloudflare_workersai/).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/cloudflare) | Package downloads | Package latest |\n",
|
||||
"| :--- | :--- |:-----:|:------------:|:------------------------------------------------------------------------:| :---: | :---: |\n",
|
||||
"| [ChatCloudflareWorkersAI](https://python.langchain.com/docs/integrations/chat/cloudflare_workersai/) | [langchain-cloudflare](https://pypi.org/project/langchain-cloudflare/) | ✅ | ❌ | ❌ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"|:-----------------------------------------:|:----------------------------------------------------:|:---------:|:----------------------------------------------:|:-----------:|:-----------:|:-----------------------------------------------------:|:------------:|:------------------------------------------------------:|:----------------------------------:|\n",
|
||||
"| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | \n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access CloudflareWorkersAI models you'll need to create a/an CloudflareWorkersAI account, get an API key, and install the `langchain-cloudflare` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Head to https://www.cloudflare.com/developer-platform/products/workers-ai/ to sign up to CloudflareWorkersAI and generate an API key. Once you've done this set the CF_API_KEY environment variable and the CF_ACCOUNT_ID environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"metadata": {
|
||||
"is_executing": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"CF_API_KEY\"):\n",
|
||||
" os.environ[\"CF_API_KEY\"] = getpass.getpass(\n",
|
||||
" \"Enter your CloudflareWorkersAI API key: \"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"CF_ACCOUNT_ID\"):\n",
|
||||
" os.environ[\"CF_ACCOUNT_ID\"] = getpass.getpass(\n",
|
||||
" \"Enter your CloudflareWorkersAI account ID: \"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain CloudflareWorkersAI integration lives in the `langchain-cloudflare` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-cloudflare"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
"\n",
|
||||
"Now we can instantiate our model object and generate chat completions:\n",
|
||||
"\n",
|
||||
"- Update model instantiation with relevant params."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2025-04-07T17:48:31.193773Z",
|
||||
"start_time": "2025-04-07T17:48:31.179196Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_cloudflare.chat_models import ChatCloudflareWorkersAI\n",
|
||||
"\n",
|
||||
"llm = ChatCloudflareWorkersAI(\n",
|
||||
" model=\"@cf/meta/llama-3.3-70b-instruct-fp8-fast\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=1024,\n",
|
||||
" # other params...\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, response_metadata={'token_usage': {'prompt_tokens': 37, 'completion_tokens': 9, 'total_tokens': 46}, 'model_name': '@cf/meta/llama-3.3-70b-instruct-fp8-fast'}, id='run-995d1970-b6be-49f3-99ae-af4cdba02304-0', usage_metadata={'input_tokens': 37, 'output_tokens': 9, 'total_tokens': 46})"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"I love programming.\"),\n",
|
||||
"]\n",
|
||||
"ai_msg = llm.invoke(messages)\n",
|
||||
"ai_msg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"J'adore la programmation.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(ai_msg.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
"\n",
|
||||
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe das Programmieren.', additional_kwargs={}, response_metadata={'token_usage': {'prompt_tokens': 32, 'completion_tokens': 7, 'total_tokens': 39}, 'model_name': '@cf/meta/llama-3.3-70b-instruct-fp8-fast'}, id='run-d1b677bc-194e-4473-90f1-aa65e8e46d50-0', usage_metadata={'input_tokens': 32, 'output_tokens': 7, 'total_tokens': 39})"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" (\n",
|
||||
" \"system\",\n",
|
||||
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
|
||||
" ),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"input_language\": \"English\",\n",
|
||||
" \"output_language\": \"German\",\n",
|
||||
" \"input\": \"I love programming.\",\n",
|
||||
" }\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Structured Outputs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "91cae406-14d7-46c9-b942-2d1476588423",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'setup': 'Why did the cat join a band?',\n",
|
||||
" 'punchline': 'Because it wanted to be the purr-cussionist',\n",
|
||||
" 'rating': '8'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"json_schema = {\n",
|
||||
" \"title\": \"joke\",\n",
|
||||
" \"description\": \"Joke to tell user.\",\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"setup\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The setup of the joke\",\n",
|
||||
" },\n",
|
||||
" \"punchline\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The punchline to the joke\",\n",
|
||||
" },\n",
|
||||
" \"rating\": {\n",
|
||||
" \"type\": \"integer\",\n",
|
||||
" \"description\": \"How funny the joke is, from 1 to 10\",\n",
|
||||
" \"default\": None,\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"setup\", \"punchline\"],\n",
|
||||
"}\n",
|
||||
"structured_llm = llm.with_structured_output(json_schema)\n",
|
||||
"\n",
|
||||
"structured_llm.invoke(\"Tell me a joke about cats\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dbfc0c43-e76b-446e-bbb1-d351640bb7be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Bind tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"id": "0765265e-4d00-4030-bf48-7e8d8c9af2ec",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'validate_user',\n",
|
||||
" 'args': {'user_id': '123',\n",
|
||||
" 'addresses': '[\"123 Fake St in Boston MA\", \"234 Pretend Boulevard in Houston TX\"]'},\n",
|
||||
" 'id': '31ec7d6a-9ce5-471b-be64-8ea0492d1387',\n",
|
||||
" 'type': 'tool_call'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 36,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def validate_user(user_id: int, addresses: List[str]) -> bool:\n",
|
||||
" \"\"\"Validate user using historical addresses.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" user_id (int): the user ID.\n",
|
||||
" addresses (List[str]): Previous addresses as a list of strings.\n",
|
||||
" \"\"\"\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools([validate_user])\n",
|
||||
"\n",
|
||||
"result = llm_with_tools.invoke(\n",
|
||||
" \"Could you validate user 123? They previously lived at \"\n",
|
||||
" \"123 Fake St in Boston MA and 234 Pretend Boulevard in \"\n",
|
||||
" \"Houston TX.\"\n",
|
||||
")\n",
|
||||
"result.tool_calls"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"https://developers.cloudflare.com/workers-ai/\n",
|
||||
"https://developers.cloudflare.com/agents/"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_litellm.chat_models import ChatLiteLLM\n",
|
||||
"from langchain_litellm import ChatLiteLLM\n",
|
||||
"\n",
|
||||
"llm = ChatLiteLLM(model=\"gpt-3.5-turbo\")"
|
||||
]
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -408,7 +408,7 @@
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"OpenAI supports a [Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions) API that is oriented toward building [agentic](/docs/concepts/agents/) applications. It includes a suite of [built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses), including web and file search. It also supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses), allowing you to continue a conversational thread without explicitly passing in previous messages.\n",
|
||||
"OpenAI supports a [Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions) API that is oriented toward building [agentic](/docs/concepts/agents/) applications. It includes a suite of [built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses), including web and file search. It also supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses), allowing you to continue a conversational thread without explicitly passing in previous messages, as well as the output from [reasoning processes](https://platform.openai.com/docs/guides/reasoning?api-mode=responses).\n",
|
||||
"\n",
|
||||
"`ChatOpenAI` will route to the Responses API if one of these features is used. You can also specify `use_responses_api=True` when instantiating `ChatOpenAI`.\n",
|
||||
"\n",
|
||||
@@ -1056,6 +1056,77 @@
|
||||
"print(second_response.text())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "67bf5bd2-0935-40a0-b1cd-c6662b681d4b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Reasoning output\n",
|
||||
"\n",
|
||||
"Some OpenAI models will generate separate text content illustrating their reasoning process. See OpenAI's [reasoning documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses) for details.\n",
|
||||
"\n",
|
||||
"OpenAI can return a summary of the model's reasoning (although it doesn't expose the raw reasoning tokens). To configure `ChatOpenAI` to return this summary, specify the `reasoning` parameter:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8d322f3a-0732-45ab-ac95-dfd4596e0d85",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'3^3 = 3 × 3 × 3 = 27.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"reasoning = {\n",
|
||||
" \"effort\": \"medium\", # 'low', 'medium', or 'high'\n",
|
||||
" \"summary\": \"auto\", # 'detailed', 'auto', or None\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"o4-mini\",\n",
|
||||
" use_responses_api=True,\n",
|
||||
" model_kwargs={\"reasoning\": reasoning},\n",
|
||||
")\n",
|
||||
"response = llm.invoke(\"What is 3^3?\")\n",
|
||||
"\n",
|
||||
"# Output\n",
|
||||
"response.text()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d7dcc082-b7c8-41b7-a5e2-441b9679e41b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"**Calculating power of three**\n",
|
||||
"\n",
|
||||
"The user is asking for the result of 3 to the power of 3, which I know is 27. It's a straightforward question, so I’ll keep my answer concise: 27. I could explain that this is the same as multiplying 3 by itself twice: 3 × 3 × 3 equals 27. However, since the user likely just needs the answer, I’ll simply respond with 27.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Reasoning\n",
|
||||
"reasoning = response.additional_kwargs[\"reasoning\"]\n",
|
||||
"for block in reasoning[\"summary\"]:\n",
|
||||
" print(block[\"text\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "57e27714",
|
||||
|
||||
@@ -36,10 +36,7 @@
|
||||
"pip install oracledb"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
}
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -51,10 +48,7 @@
|
||||
"from settings import s"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
}
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -97,16 +91,14 @@
|
||||
"doc_2 = doc_loader_2.load()"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
}
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"With TLS authentication, wallet_location and wallet_password are not required."
|
||||
"With TLS authentication, wallet_location and wallet_password are not required.\n",
|
||||
"Bind variable option is provided by argument \"parameters\"."
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
@@ -117,6 +109,8 @@
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"SQL_QUERY = \"select channel_id, channel_desc from sh.channels where channel_desc = :1 fetch first 5 rows only\"\n",
|
||||
"\n",
|
||||
"doc_loader_3 = OracleAutonomousDatabaseLoader(\n",
|
||||
" query=SQL_QUERY,\n",
|
||||
" user=s.USERNAME,\n",
|
||||
@@ -124,6 +118,7 @@
|
||||
" schema=s.SCHEMA,\n",
|
||||
" config_dir=s.CONFIG_DIR,\n",
|
||||
" tns_name=s.TNS_NAME,\n",
|
||||
" parameters=[\"Direct Sales\"],\n",
|
||||
")\n",
|
||||
"doc_3 = doc_loader_3.load()\n",
|
||||
"\n",
|
||||
@@ -133,6 +128,7 @@
|
||||
" password=s.PASSWORD,\n",
|
||||
" schema=s.SCHEMA,\n",
|
||||
" connection_string=s.CONNECTION_STRING,\n",
|
||||
" parameters=[\"Direct Sales\"],\n",
|
||||
")\n",
|
||||
"doc_4 = doc_loader_4.load()"
|
||||
],
|
||||
|
||||
187
docs/docs/integrations/document_loaders/singlestore.ipynb
Normal file
187
docs/docs/integrations/document_loaders/singlestore.ipynb
Normal file
@@ -0,0 +1,187 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: SingleStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreLoader\n",
|
||||
"\n",
|
||||
"The `SingleStoreLoader` allows you to load documents directly from a SingleStore database table. It is part of the `langchain-singlestore` integration package.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"### Integration Details\n",
|
||||
"\n",
|
||||
"| Class | Package | JS Support |\n",
|
||||
"| :--- | :--- | :---: |\n",
|
||||
"| `SingleStoreLoader` | `langchain_singlestore` | ❌ |\n",
|
||||
"\n",
|
||||
"### Features\n",
|
||||
"- Load documents lazily to handle large datasets efficiently.\n",
|
||||
"- Supports native asynchronous operations.\n",
|
||||
"- Easily configurable to work with different database schemas.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To use the `SingleStoreLoader`, you need to install the `langchain-singlestore` package. Follow the installation instructions below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"Install **langchain_singlestore**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain_singlestore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"To initialize `SingleStoreLoader`, you need to provide connection parameters for the SingleStore database and specify the table and fields to load documents from.\n",
|
||||
"\n",
|
||||
"### Required Parameters:\n",
|
||||
"- **host** (`str`): Hostname, IP address, or URL for the database.\n",
|
||||
"- **table_name** (`str`): Name of the table to query. Defaults to `embeddings`.\n",
|
||||
"- **content_field** (`str`): Field containing document content. Defaults to `content`.\n",
|
||||
"- **metadata_field** (`str`): Field containing document metadata. Defaults to `metadata`.\n",
|
||||
"\n",
|
||||
"### Optional Parameters:\n",
|
||||
"- **id_field** (`str`): Field containing document IDs. Defaults to `id`.\n",
|
||||
"\n",
|
||||
"### Connection Pool Parameters:\n",
|
||||
"- **pool_size** (`int`): Number of active connections in the pool. Defaults to `5`.\n",
|
||||
"- **max_overflow** (`int`): Maximum connections beyond `pool_size`. Defaults to `10`.\n",
|
||||
"- **timeout** (`float`): Connection timeout in seconds. Defaults to `30`.\n",
|
||||
"\n",
|
||||
"### Additional Options:\n",
|
||||
"- **pure_python** (`bool`): Enables pure Python mode.\n",
|
||||
"- **local_infile** (`bool`): Allows local file uploads.\n",
|
||||
"- **charset** (`str`): Character set for string values.\n",
|
||||
"- **ssl_key**, **ssl_cert**, **ssl_ca** (`str`): Paths to SSL files.\n",
|
||||
"- **ssl_disabled** (`bool`): Disables SSL.\n",
|
||||
"- **ssl_verify_cert** (`bool`): Verifies server's certificate.\n",
|
||||
"- **ssl_verify_identity** (`bool`): Verifies server's identity.\n",
|
||||
"- **autocommit** (`bool`): Enables autocommits.\n",
|
||||
"- **results_type** (`str`): Structure of query results (e.g., `tuples`, `dicts`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_singlestore.document_loaders import SingleStoreLoader\n",
|
||||
"\n",
|
||||
"loader = SingleStoreLoader(\n",
|
||||
" host=\"127.0.0.1:3306/db\",\n",
|
||||
" table_name=\"documents\",\n",
|
||||
" content_field=\"content\",\n",
|
||||
" metadata_field=\"metadata\",\n",
|
||||
" id_field=\"id\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"docs[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Lazy Load"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page = []\n",
|
||||
"for doc in loader.lazy_load():\n",
|
||||
" page.append(doc)\n",
|
||||
" if len(page) >= 10:\n",
|
||||
" # do some paged operation, e.g.\n",
|
||||
" # index.upsert(page)\n",
|
||||
"\n",
|
||||
" page = []"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all SingleStore Document Loader features and configurations head to the github page: [https://github.com/singlestore-labs/langchain-singlestore/](https://github.com/singlestore-labs/langchain-singlestore/)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -2525,7 +2525,17 @@
|
||||
"source": [
|
||||
"## `SingleStoreDB` semantic cache\n",
|
||||
"\n",
|
||||
"You can use [SingleStoreDB](https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/) as a semantic cache to cache prompts and responses."
|
||||
"You can use [SingleStore](https://python.langchain.com/docs/integrations/vectorstores/singlestore/) as a semantic cache to cache prompts and responses."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "596e15e8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-singlestore"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -2535,11 +2545,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.cache import SingleStoreDBSemanticCache\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_singlestore.cache import SingleStoreSemanticCache\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
" SingleStoreDBSemanticCache(\n",
|
||||
" SingleStoreSemanticCache(\n",
|
||||
" embedding=OpenAIEmbeddings(),\n",
|
||||
" host=\"root:pass@localhost:3306/db\",\n",
|
||||
" )\n",
|
||||
|
||||
@@ -35,9 +35,18 @@ from langchain_aws import ChatBedrock
|
||||
```
|
||||
|
||||
### Bedrock Converse
|
||||
AWS has recently released the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) integration has been released.
|
||||
AWS Bedrock maintains a [Converse API](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html)
|
||||
that provides a unified conversational interface for Bedrock models. This API does not
|
||||
yet support custom models. You can see a list of all
|
||||
[models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html).
|
||||
|
||||
We recommend using `ChatBedrockConverse` for users who do not need to use custom models. See the [docs](/docs/integrations/chat/bedrock/#bedrock-converse-api) and [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) for more detail.
|
||||
:::info
|
||||
|
||||
We recommend the Converse API for users who do not need to use custom models. It can be accessed using [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html).
|
||||
|
||||
:::
|
||||
|
||||
See a [usage example](/docs/integrations/chat/bedrock).
|
||||
|
||||
```python
|
||||
from langchain_aws import ChatBedrockConverse
|
||||
|
||||
@@ -8,18 +8,34 @@
|
||||
> learning models, on the `Cloudflare` network, from your code via REST API.
|
||||
|
||||
|
||||
## ChatModels
|
||||
|
||||
See [installation instructions and usage example](/docs/integrations/chat/cloudflare_workersai).
|
||||
|
||||
```python
|
||||
from langchain_cloudflare import ChatCloudflareWorkersAI
|
||||
```
|
||||
|
||||
## VectorStore
|
||||
|
||||
See [installation instructions and usage example](/docs/integrations/vectorstores/cloudflare_vectorize).
|
||||
|
||||
```python
|
||||
from langchain_cloudflare import CloudflareVectorize
|
||||
```
|
||||
|
||||
## Embeddings
|
||||
|
||||
See [installation instructions and usage example](/docs/integrations/text_embedding/cloudflare_workersai).
|
||||
|
||||
```python
|
||||
from langchain_cloudflare import CloudflareWorkersAIEmbeddings
|
||||
```
|
||||
|
||||
## LLMs
|
||||
|
||||
See [installation instructions and usage example](/docs/integrations/llms/cloudflare_workersai).
|
||||
|
||||
```python
|
||||
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
|
||||
```
|
||||
|
||||
## Embedding models
|
||||
|
||||
See [installation instructions and usage example](/docs/integrations/text_embedding/cloudflare_workersai).
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings.cloudflare_workersai import CloudflareWorkersAIEmbeddings
|
||||
```
|
||||
```
|
||||
32
docs/docs/integrations/providers/galaxia.mdx
Normal file
32
docs/docs/integrations/providers/galaxia.mdx
Normal file
@@ -0,0 +1,32 @@
|
||||
# Smabbler
|
||||
> Smabbler’s graph-powered platform boosts AI development by transforming data into a structured knowledge foundation.
|
||||
|
||||
# Galaxia
|
||||
|
||||
> Galaxia Knowledge Base is an integrated knowledge base and retrieval mechanism for RAG. In contrast to standard solution, it is based on Knowledge Graphs built using symbolic NLP and Knowledge Representation solutions. Provided texts are analysed and transformed into Graphs containing text, language and semantic information. This rich structure allows for retrieval that is based on semantic information, not on vector similarity/distance.
|
||||
|
||||
Implementing RAG using Galaxia involves first uploading your files to [Galaxia](https://beta.cloud.smabbler.com/home), analyzing them there and then building a model (knowledge graph). When the model is built, you can use `GalaxiaRetriever` to connect to the API and start retrieving.
|
||||
|
||||
More information: [docs](https://smabbler.gitbook.io/smabbler)
|
||||
|
||||
## Installation
|
||||
```
|
||||
pip install langchain-galaxia-retriever
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
from langchain_galaxia_retriever.retriever import GalaxiaRetriever
|
||||
|
||||
gr = GalaxiaRetriever(
|
||||
api_url="beta.api.smabbler.com",
|
||||
api_key="<key>",
|
||||
knowledge_base_id="<knowledge_base_id>",
|
||||
n_retries=10,
|
||||
wait_time=5,
|
||||
)
|
||||
|
||||
result = gr.invoke('<test question>')
|
||||
print(result)
|
||||
|
||||
165
docs/docs/integrations/providers/langfuse.mdx
Normal file
165
docs/docs/integrations/providers/langfuse.mdx
Normal file
@@ -0,0 +1,165 @@
|
||||
# Langfuse 🪢
|
||||
|
||||
> **What is Langfuse?** [Langfuse](https://langfuse.com) is an open source LLM engineering platform that helps teams trace API calls, monitor performance, and debug issues in their AI applications.
|
||||
|
||||
## Tracing LangChain
|
||||
|
||||
[Langfuse Tracing](https://langfuse.com/docs/tracing) integrates with Langchain using Langchain Callbacks ([Python](https://python.langchain.com/docs/how_to/#callbacks), [JS](https://js.langchain.com/docs/how_to/#callbacks)). Thereby, the Langfuse SDK automatically creates a nested trace for every run of your Langchain applications. This allows you to log, analyze and debug your LangChain application.
|
||||
|
||||
You can configure the integration via (1) constructor arguments or (2) environment variables. Get your Langfuse credentials by signing up at [cloud.langfuse.com](https://cloud.langfuse.com) or [self-hosting Langfuse](https://langfuse.com/self-hosting).
|
||||
|
||||
### Constructor arguments
|
||||
|
||||
```python
|
||||
pip install langfuse
|
||||
```
|
||||
|
||||
```python
|
||||
# Initialize Langfuse handler
|
||||
from langfuse.callback import CallbackHandler
|
||||
langfuse_handler = CallbackHandler(
|
||||
secret_key="sk-lf-...",
|
||||
public_key="pk-lf-...",
|
||||
host="https://cloud.langfuse.com", # 🇪🇺 EU region
|
||||
# host="https://us.cloud.langfuse.com", # 🇺🇸 US region
|
||||
)
|
||||
|
||||
# Your Langchain code
|
||||
|
||||
# Add Langfuse handler as callback (classic and LCEL)
|
||||
chain.invoke({"input": "<user_input>"}, config={"callbacks": [langfuse_handler]})
|
||||
```
|
||||
|
||||
### Environment variables
|
||||
|
||||
```bash filename=".env"
|
||||
LANGFUSE_SECRET_KEY="sk-lf-..."
|
||||
LANGFUSE_PUBLIC_KEY="pk-lf-..."
|
||||
# 🇪🇺 EU region
|
||||
LANGFUSE_HOST="https://cloud.langfuse.com"
|
||||
# 🇺🇸 US region
|
||||
# LANGFUSE_HOST="https://us.cloud.langfuse.com"
|
||||
```
|
||||
|
||||
```python
|
||||
# Initialize Langfuse handler
|
||||
from langfuse.callback import CallbackHandler
|
||||
langfuse_handler = CallbackHandler()
|
||||
|
||||
# Your Langchain code
|
||||
|
||||
# Add Langfuse handler as callback (classic and LCEL)
|
||||
chain.invoke({"input": "<user_input>"}, config={"callbacks": [langfuse_handler]})
|
||||
```
|
||||
|
||||
To see how to use this integration together with other Langfuse features, check out [this end-to-end example](https://langfuse.com/docs/integrations/langchain/example-python).
|
||||
|
||||
## Tracing LangGraph
|
||||
|
||||
This part demonstrates how [Langfuse](https://langfuse.com/docs) helps to debug, analyze, and iterate on your LangGraph application using the [LangChain integration](https://langfuse.com/docs/integrations/langchain/tracing).
|
||||
|
||||
### Initialize Langfuse
|
||||
|
||||
**Note:** You need to run at least Python 3.11 ([GitHub Issue](https://github.com/langfuse/langfuse/issues/1926)).
|
||||
|
||||
Initialize the Langfuse client with your [API keys](https://langfuse.com/faq/all/where-are-langfuse-api-keys) from the project settings in the Langfuse UI and add them to your environment.
|
||||
|
||||
|
||||
```python
|
||||
%pip install langfuse
|
||||
%pip install langchain langgraph langchain_openai langchain_community
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
import os
|
||||
|
||||
# get keys for your project from https://cloud.langfuse.com
|
||||
os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-***"
|
||||
os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-***"
|
||||
os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # for EU data region
|
||||
# os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # for US data region
|
||||
|
||||
# your openai key
|
||||
os.environ["OPENAI_API_KEY"] = "***"
|
||||
```
|
||||
|
||||
### Simple chat app with LangGraph
|
||||
|
||||
**What we will do in this section:**
|
||||
|
||||
* Build a support chatbot in LangGraph that can answer common questions
|
||||
* Tracing the chatbot's input and output using Langfuse
|
||||
|
||||
We will start with a basic chatbot and build a more advanced multi agent setup in the next section, introducing key LangGraph concepts along the way.
|
||||
|
||||
#### Create Agent
|
||||
|
||||
Start by creating a `StateGraph`. A `StateGraph` object defines our chatbot's structure as a state machine. We will add nodes to represent the LLM and functions the chatbot can call, and edges to specify how the bot transitions between these functions.
|
||||
|
||||
|
||||
```python
|
||||
from typing import Annotated
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_core.messages import HumanMessage
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from langgraph.graph import StateGraph
|
||||
from langgraph.graph.message import add_messages
|
||||
|
||||
class State(TypedDict):
|
||||
# Messages have the type "list". The `add_messages` function in the annotation defines how this state key should be updated
|
||||
# (in this case, it appends messages to the list, rather than overwriting them)
|
||||
messages: Annotated[list, add_messages]
|
||||
|
||||
graph_builder = StateGraph(State)
|
||||
|
||||
llm = ChatOpenAI(model = "gpt-4o", temperature = 0.2)
|
||||
|
||||
# The chatbot node function takes the current State as input and returns an updated messages list. This is the basic pattern for all LangGraph node functions.
|
||||
def chatbot(state: State):
|
||||
return {"messages": [llm.invoke(state["messages"])]}
|
||||
|
||||
# Add a "chatbot" node. Nodes represent units of work. They are typically regular python functions.
|
||||
graph_builder.add_node("chatbot", chatbot)
|
||||
|
||||
# Add an entry point. This tells our graph where to start its work each time we run it.
|
||||
graph_builder.set_entry_point("chatbot")
|
||||
|
||||
# Set a finish point. This instructs the graph "any time this node is run, you can exit."
|
||||
graph_builder.set_finish_point("chatbot")
|
||||
|
||||
# To be able to run our graph, call "compile()" on the graph builder. This creates a "CompiledGraph" we can use invoke on our state.
|
||||
graph = graph_builder.compile()
|
||||
```
|
||||
|
||||
#### Add Langfuse as callback to the invocation
|
||||
|
||||
Now, we will add then [Langfuse callback handler for LangChain](https://langfuse.com/docs/integrations/langchain/tracing) to trace the steps of our application: `config={"callbacks": [langfuse_handler]}`
|
||||
|
||||
|
||||
```python
|
||||
from langfuse.callback import CallbackHandler
|
||||
|
||||
# Initialize Langfuse CallbackHandler for Langchain (tracing)
|
||||
langfuse_handler = CallbackHandler()
|
||||
|
||||
for s in graph.stream({"messages": [HumanMessage(content = "What is Langfuse?")]},
|
||||
config={"callbacks": [langfuse_handler]}):
|
||||
print(s)
|
||||
```
|
||||
|
||||
```
|
||||
{'chatbot': {'messages': [AIMessage(content='Langfuse is a tool designed to help developers monitor and observe the performance of their Large Language Model (LLM) applications. It provides detailed insights into how these applications are functioning, allowing for better debugging, optimization, and overall management. Langfuse offers features such as tracking key metrics, visualizing data, and identifying potential issues in real-time, making it easier for developers to maintain and improve their LLM-based solutions.', response_metadata={'token_usage': {'completion_tokens': 86, 'prompt_tokens': 13, 'total_tokens': 99}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_400f27fa1f', 'finish_reason': 'stop', 'logprobs': None}, id='run-9a0c97cb-ccfe-463e-902c-5a5900b796b4-0', usage_metadata={'input_tokens': 13, 'output_tokens': 86, 'total_tokens': 99})]}}
|
||||
```
|
||||
|
||||
|
||||
#### View traces in Langfuse
|
||||
|
||||
Example trace in Langfuse: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/d109e148-d188-4d6e-823f-aac0864afbab
|
||||
|
||||

|
||||
|
||||
- Check out the [full notebook](https://langfuse.com/docs/integrations/langchain/example-python-langgraph) to see more examples.
|
||||
- To learn how to evaluate the performance of your LangGraph application, check out the [LangGraph evaluation guide](https://langfuse.com/docs/integrations/langchain/example-langgraph-agents).
|
||||
@@ -10,7 +10,7 @@ pip install langchain-litellm
|
||||
|
||||
## Chat Models
|
||||
```python
|
||||
from langchain_litellm.chat_models import ChatLiteLLM
|
||||
from langchain_litellm import ChatLiteLLM
|
||||
```
|
||||
See more detail in the guide [here](/docs/integrations/chat/litellm).
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ with the default values of "service-name = mymaster" and "db-number = 0" if not
|
||||
The service-name is the redis server monitoring group name as configured within the Sentinel.
|
||||
|
||||
The current url format limits the connection string to one sentinel host only (no list can be given) and
|
||||
booth Redis server and sentinel must have the same password set (if used).
|
||||
both Redis server and sentinel must have the same password set (if used).
|
||||
|
||||
#### Redis Cluster connection url
|
||||
|
||||
|
||||
62
docs/docs/integrations/providers/singlestore.ipynb
Normal file
62
docs/docs/integrations/providers/singlestore.ipynb
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStore Integration\n",
|
||||
"\n",
|
||||
"[SingleStore](https://singlestore.com/) is a high-performance, distributed SQL database designed to excel in both [cloud](https://www.singlestore.com/cloud/) and on-premises environments. It offers a versatile feature set, seamless deployment options, and exceptional performance.\n",
|
||||
"\n",
|
||||
"This integration provides the following components to leverage SingleStore's capabilities:\n",
|
||||
"\n",
|
||||
"- **`SingleStoreLoader`**: Load documents directly from a SingleStore database table.\n",
|
||||
"- **`SingleStoreSemanticCache`**: Use SingleStore as a semantic cache for efficient storage and retrieval of embeddings.\n",
|
||||
"- **`SingleStoreChatMessageHistory`**: Store and retrieve chat message history in SingleStore.\n",
|
||||
"- **`SingleStoreVectorStore`**: Store document embeddings and perform fast vector and full-text searches.\n",
|
||||
"\n",
|
||||
"These components enable efficient document storage, embedding management, and advanced search capabilities, combining full-text and vector-based search for fast and accurate queries."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "y8ku6X96sebl"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_singlestore import (\n",
|
||||
" SingleStoreChatMessageHistory,\n",
|
||||
" SingleStoreLoader,\n",
|
||||
" SingleStoreSemanticCache,\n",
|
||||
" SingleStoreVectorStore,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
# SingleStoreDB
|
||||
|
||||
>[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
There are several ways to establish a [connection](https://singlestoredb-python.labs.singlestore.com/generated/singlestoredb.connect.html) to the database. You can either set up environment variables or pass named parameters to the `SingleStoreDB constructor`.
|
||||
Alternatively, you may provide these parameters to the `from_documents` and `from_texts` methods.
|
||||
|
||||
```bash
|
||||
pip install singlestoredb
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
See a [usage example](/docs/integrations/vectorstores/singlestoredb).
|
||||
|
||||
```python
|
||||
from langchain_community.vectorstores import SingleStoreDB
|
||||
```
|
||||
|
||||
## Memory
|
||||
|
||||
See a [usage example](/docs/integrations/memory/singlestoredb_chat_message_history).
|
||||
|
||||
```python
|
||||
from langchain.memory import SingleStoreDBChatMessageHistory
|
||||
```
|
||||
@@ -14,7 +14,7 @@
|
||||
"3. The [Boomerang](https://vectara.com/how-boomerang-takes-retrieval-augmented-generation-to-the-next-level-via-grounded-generation/) embeddings model.\n",
|
||||
"4. Its own internal vector database where text chunks and embedding vectors are stored.\n",
|
||||
"5. A query service that automatically encodes the query into embedding, and retrieves the most relevant text segments, including support for [Hybrid Search](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) as well as multiple reranking options such as the [multi-lingual relevance reranker](https://www.vectara.com/blog/deep-dive-into-vectara-multilingual-reranker-v1-state-of-the-art-reranker-across-100-languages), [MMR](https://vectara.com/get-diverse-results-and-comprehensive-summaries-with-vectaras-mmr-reranker/), [UDF reranker](https://www.vectara.com/blog/rag-with-user-defined-functions-based-reranking). \n",
|
||||
"6. An LLM to for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"6. An LLM for creating a [generative summary](https://docs.vectara.com/docs/learn/grounded-generation/grounded-generation-overview), based on the retrieved documents (context), including citations.\n",
|
||||
"\n",
|
||||
"For more information:\n",
|
||||
"- [Documentation](https://docs.vectara.com/docs/)\n",
|
||||
|
||||
23
docs/docs/integrations/providers/ydb.mdx
Normal file
23
docs/docs/integrations/providers/ydb.mdx
Normal file
@@ -0,0 +1,23 @@
|
||||
# YDB
|
||||
|
||||
All functionality related to YDB.
|
||||
|
||||
> [YDB](https://ydb.tech/) is a versatile open source Distributed SQL Database that combines
|
||||
> high availability and scalability with strong consistency and ACID transactions.
|
||||
> It accommodates transactional (OLTP), analytical (OLAP), and streaming workloads simultaneously.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
```bash
|
||||
pip install langchain-ydb
|
||||
```
|
||||
|
||||
## Vector Store
|
||||
|
||||
To import YDB vector store:
|
||||
|
||||
```python
|
||||
from langchain_ydb.vectorstores import YDB
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the YDB vector store, see [this notebook](/docs/integrations/vectorstores/ydb).
|
||||
@@ -40,11 +40,21 @@
|
||||
"\n",
|
||||
"+ An existing index with vector fields. There are several ways to create one, including using the [vector store module](../vectorstores/azuresearch.ipynb). Or, [try the Azure AI Search REST APIs](https://learn.microsoft.com/azure/search/search-get-started-vector).\n",
|
||||
"\n",
|
||||
"+ An API key. API keys are generated when you create the search service. If you're just querying an index, you can use the query API key, otherwise use an admin API key. See [Find your API keys](https://learn.microsoft.com/azure/search/search-security-api-keys?tabs=rest-use%2Cportal-find%2Cportal-query#find-existing-keys) for details.\n",
|
||||
"+ An API key or Azure AD Token.\n",
|
||||
" + API keys are generated when you create the search service. If you're just querying an index, you can use the query API key, otherwise use an admin API key. See [Find your API keys](https://learn.microsoft.com/azure/search/search-security-api-keys?tabs=rest-use%2Cportal-find%2Cportal-query#find-existing-keys) for details.\n",
|
||||
" + Azure AD Token can be used with Azure Managed Identity. See [Connect your app to Azure AI Search using identities](https://learn.microsoft.com/en-us/azure/search/keyless-connections?tabs=python%2Cazure-cli) for details.\n",
|
||||
"\n",
|
||||
"We can then set the search service name, index name, and API key as environment variables (alternatively, you can pass them as arguments to `AzureAISearchRetriever`). The search index provides the searchable content."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2af9655d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With an API Key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -59,6 +69,28 @@
|
||||
"os.environ[\"AZURE_AI_SEARCH_API_KEY\"] = \"<YOUR_API_KEY>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "35ee4780",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With an Azure AD Token"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7f33263c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"AZURE_AI_SEARCH_SERVICE_NAME\"] = \"<YOUR_SEARCH_SERVICE_NAME>\"\n",
|
||||
"os.environ[\"AZURE_AI_SEARCH_INDEX_NAME\"] = \"<YOUR_SEARCH_INDEX_NAME>\"\n",
|
||||
"os.environ[\"AZURE_AI_SEARCH_AD_TOKEN\"] = \"<YOUR_AZURE_AD_TOKEN>\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e635218-8634-4f39-abc5-39e319eeb136",
|
||||
|
||||
213
docs/docs/integrations/retrievers/galaxia-retriever.ipynb
Normal file
213
docs/docs/integrations/retrievers/galaxia-retriever.ipynb
Normal file
@@ -0,0 +1,213 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "2af1fec5-4ca6-4167-8ee1-13314aac3258",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Galaxia\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1d7d6cbc-4373-4fb5-94dd-acd610165452",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Galaxia Retriever\n",
|
||||
"\n",
|
||||
"Galaxia is GraphRAG solution, which automates document processing, knowledge base (Graph Language Model) creation and retrieval:\n",
|
||||
"[galaxia-rag](https://smabbler.gitbook.io/smabbler/api-rag/smabblers-api-rag)\n",
|
||||
"\n",
|
||||
"To use Galaxia first upload your texts and create a Graph Language Model here: [smabbler-cloud](https://beta.cloud.smabbler.com)\n",
|
||||
"\n",
|
||||
"After the model is built and activated, you will be able to use this integration to retrieve what you need.\n",
|
||||
"\n",
|
||||
"The module repository is located here: [github](https://github.com/rrozanski-smabbler/galaxia-langchain)\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"| Retriever | Self-host | Cloud offering | Package |\n",
|
||||
"| :--- | :--- | :---: | :---: |\n",
|
||||
"[Galaxia Retriever](https://github.com/rrozanski-smabbler/galaxia-langchain) | ❌ | ✅ | __langchain-galaxia-retriever__ |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "82fa1c05-c205-4429-a74c-e6c81c4e8611",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"Before you can retrieve anything you need to create your Graph Language Model here: [smabbler-cloud](https://beta.cloud.smabbler.com)\n",
|
||||
"\n",
|
||||
"following these 3 simple steps: [rag-instruction](https://smabbler.gitbook.io/smabbler/api-rag/build-rag-model-in-3-steps)\n",
|
||||
"\n",
|
||||
"Don't forget to activate the model after building it!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "91897867-eb39-4c3b-8df8-5427043ecdcd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"The retriever is implemented in the following package: [pypi](https://pypi.org/project/langchain-galaxia-retriever/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ceca36f2-013c-4b28-81fe-8808d0cf6419",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-galaxia-retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "019e0e50-5e66-440b-9cf1-d21b4009bf13",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c7188217-4b26-4201-b15a-b7a5f263f815",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_galaxia_retriever.retriever import GalaxiaRetriever\n",
|
||||
"\n",
|
||||
"gr = GalaxiaRetriever(\n",
|
||||
" api_url=\"beta.api.smabbler.com\",\n",
|
||||
" api_key=\"<key>\", # you can find it here: https://beta.cloud.smabbler.com/user/account\n",
|
||||
" knowledge_base_id=\"<knowledge_base_id>\", # you can find it in https://beta.cloud.smabbler.com , in the model table\n",
|
||||
" n_retries=10,\n",
|
||||
" wait_time=5,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02d288a5-4f76-472e-9a60-eea8e6b8dc7a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5f79e03f-77a6-4eb6-b41d-f3da2f897654",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = gr.invoke(\"<test question>\")\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ffb2a595-a901-477a-a374-efd091bc1c9a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use within a chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9c2e2394-ca33-47be-a851-551b4216daea",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ed8699d6-d65d-40ea-8c58-8d809cc512cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"\"\"Answer the question based only on the context provided.\n",
|
||||
"\n",
|
||||
"Context: {context}\n",
|
||||
"\n",
|
||||
"Question: {question}\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def format_docs(docs):\n",
|
||||
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = (\n",
|
||||
" {\"context\": gr | format_docs, \"question\": RunnablePassthrough()}\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f9b944d7-8800-4926-b1ce-fcdc52ecda1c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.invoke(\"<test question>\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "11b5c9a5-0a66-415f-98f8-f12080cad30a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For more information about Galaxia Retriever check its implementation on github [github](https://github.com/rrozanski-smabbler/galaxia-langchain)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ab66dd43",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreDB\n",
|
||||
"\n",
|
||||
">[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This notebook shows how to use a retriever that uses `SingleStoreDB`.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "51b49135-a61a-49e8-869d-7c1d76794cd7",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Establishing a connection to the database is facilitated through the singlestoredb Python connector.\n",
|
||||
"# Please ensure that this connector is installed in your working environment.\n",
|
||||
"%pip install --upgrade --quiet singlestoredb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "aaf80e7f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Retriever from vector store"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bcb3c8c2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# We want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores import SingleStoreDB\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"loader = TextLoader(\"../../how_to/state_of_the_union.txt\")\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"\n",
|
||||
"# Setup connection url as environment variable\n",
|
||||
"os.environ[\"SINGLESTOREDB_URL\"] = \"root:pass@localhost:3306/db\"\n",
|
||||
"\n",
|
||||
"# Load documents to the store\n",
|
||||
"docsearch = SingleStoreDB.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" table_name=\"notebook\", # use table with a custom name\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# create retriever from the vector store\n",
|
||||
"retriever = docsearch.as_retriever(search_kwargs={\"k\": 2})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fc0915db",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Search with retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "b605284d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"result = retriever.invoke(\"What did the president say about Ketanji Brown Jackson\")\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -47,7 +47,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.cloudflare_workersai import (\n",
|
||||
"from langchain_cloudflare.embeddings import (\n",
|
||||
" CloudflareWorkersAIEmbeddings,\n",
|
||||
")"
|
||||
]
|
||||
|
||||
@@ -14,10 +14,12 @@
|
||||
"## Installation and setup\n",
|
||||
"\n",
|
||||
"To use this tool, you must first set as environment variables:\n",
|
||||
" JIRA_API_TOKEN\n",
|
||||
" JIRA_USERNAME\n",
|
||||
" JIRA_INSTANCE_URL\n",
|
||||
" JIRA_CLOUD"
|
||||
" JIRA_INSTANCE_URL,\n",
|
||||
" JIRA_CLOUD\n",
|
||||
"\n",
|
||||
"You have the choice between two authentication methods:\n",
|
||||
"- API token authentication: set the JIRA_API_TOKEN (and JIRA_USERNAME if needed) environment variables\n",
|
||||
"- OAuth2.0 authentication: set the JIRA_OAUTH2 environment variable as a dict having as fields \"client_id\" and \"token\" which is a dict containing at least \"access_token\" and \"token_type\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -79,6 +81,12 @@
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3c925f1468696e4c",
|
||||
"metadata": {},
|
||||
"source": "For authentication with API token"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
@@ -109,6 +117,27 @@
|
||||
"os.environ[\"JIRA_CLOUD\"] = \"True\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "325ea81fb416aac6",
|
||||
"metadata": {},
|
||||
"source": "For authentication with a Oauth2.0"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "917e83e3a764d91a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"JIRA_OAUTH2\"] = (\n",
|
||||
" '{\"client_id\": \"123\", \"token\": {\"access_token\": \"abc\", \"token_type\": \"bearer\"}}'\n",
|
||||
")\n",
|
||||
"os.environ[\"JIRA_INSTANCE_URL\"] = \"https://jira.atlassian.com\"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = \"xyz\"\n",
|
||||
"os.environ[\"JIRA_CLOUD\"] = \"True\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
@@ -215,15 +244,15 @@
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to create an issue in project PW\n",
|
||||
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
|
||||
"\u001B[32;1m\u001B[1;3m I need to create an issue in project PW\n",
|
||||
"Action: Create Issue\n",
|
||||
"Action Input: {\"summary\": \"Make more fried rice\", \"description\": \"Reminder to make more fried rice\", \"issuetype\": {\"name\": \"Task\"}, \"priority\": {\"name\": \"Low\"}, \"project\": {\"key\": \"PW\"}}\u001b[0m\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3mNone\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: A new issue has been created in project PW with the summary \"Make more fried rice\" and description \"Reminder to make more fried rice\".\u001b[0m\n",
|
||||
"Action Input: {\"summary\": \"Make more fried rice\", \"description\": \"Reminder to make more fried rice\", \"issuetype\": {\"name\": \"Task\"}, \"priority\": {\"name\": \"Low\"}, \"project\": {\"key\": \"PW\"}}\u001B[0m\n",
|
||||
"Observation: \u001B[38;5;200m\u001B[1;3mNone\u001B[0m\n",
|
||||
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer\n",
|
||||
"Final Answer: A new issue has been created in project PW with the summary \"Make more fried rice\" and description \"Reminder to make more fried rice\".\u001B[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
"\u001B[1m> Finished chain.\u001B[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -98,7 +98,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "310d21b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -125,7 +125,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"id": "ccfb4159-34ac-4816-a8f0-795c5442c0b2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -148,16 +148,16 @@
|
||||
" \"TABLEAU_JWT_SECRET\"\n",
|
||||
") # a JWT secret ID (obtained through Tableau's admin UI)\n",
|
||||
"tableau_api_version = \"3.21\" # the current Tableau REST API Version\n",
|
||||
"tableau_user = \"joe.constantino@salesforce.com\" # replace with the username querying the target Tableau Data Source\n",
|
||||
"tableau_user = \"joe.constantino@salesforce.com\" # enter the username querying the target Tableau Data Source\n",
|
||||
"\n",
|
||||
"# For this cookbook we are connecting to the Superstore dataset that comes by default with every Tableau server\n",
|
||||
"datasource_luid = (\n",
|
||||
" \"0965e61b-a072-43cf-994c-8c6cf526940d\" # the target data source for this Tool\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model_provider = \"openai\" # the name of the model provider you are using for your Agent\n",
|
||||
"# Add variables to control LLM models for the Agent and Tools\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] # set an your model API key as an environment variable\n",
|
||||
"tooling_llm_model = \"gpt-4o\""
|
||||
"tooling_llm_model = \"gpt-4o-mini\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -178,12 +178,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"id": "72ee3eca",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initalize simple_datasource_qa for querying Tableau Datasources through VDS\n",
|
||||
"# Initialize simple_datasource_qa for querying Tableau Datasources through VDS\n",
|
||||
"analyze_datasource = initialize_simple_datasource_qa(\n",
|
||||
" domain=tableau_server,\n",
|
||||
" site=tableau_site,\n",
|
||||
@@ -194,6 +194,7 @@
|
||||
" tableau_user=tableau_user,\n",
|
||||
" datasource_luid=datasource_luid,\n",
|
||||
" tooling_llm_model=tooling_llm_model,\n",
|
||||
" model_provider=model_provider,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# load the List of Tools to be used by the Agent. In this case we will just load our data source Q&A tool.\n",
|
||||
@@ -211,47 +212,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"id": "06a1d3f7-79a8-452e-b37e-9070d15445b0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
"Here are the results for the states with the highest sales and profits based on the data queried:\n",
|
||||
"\n",
|
||||
"### States with the Most Sales\n",
|
||||
"1. **California**: $457,687.63\n",
|
||||
"2. **New York**: $310,876.27\n",
|
||||
"3. **Texas**: $170,188.05\n",
|
||||
"4. **Washington**: $138,641.27\n",
|
||||
"5. **Pennsylvania**: $116,511.91\n",
|
||||
"\n",
|
||||
"### States with the Most Profit\n",
|
||||
"1. **California**: $76,381.39\n",
|
||||
"2. **New York**: $74,038.55\n",
|
||||
"3. **Washington**: $33,402.65\n",
|
||||
"4. **Michigan**: $24,463.19\n",
|
||||
"5. **Virginia**: $18,597.95\n",
|
||||
"\n",
|
||||
"### Comparison\n",
|
||||
"- **California** and **New York** are the only states that appear in both lists, indicating they are the top sellers and also generate the most profit.\n",
|
||||
"- **Texas**, while having the third highest sales, does not rank in the top five for profit, showing a potential issue with profitability despite high sales.\n",
|
||||
"\n",
|
||||
"This analysis suggests that high sales do not always correlate with high profits, as seen with Texas."
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4o\", temperature=0)\n",
|
||||
"\n",
|
||||
"tableauAgent = create_react_agent(model, tools)\n",
|
||||
"\n",
|
||||
@@ -261,13 +229,13 @@
|
||||
" \"messages\": [\n",
|
||||
" (\n",
|
||||
" \"human\",\n",
|
||||
" \"which states sell the most? Are those the same states with the most profits?\",\n",
|
||||
" \"what's going on with table sales?\",\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"messages\n",
|
||||
"# display(Markdown(messages['messages'][4].content)) #display a nicely formatted answer for successful generations"
|
||||
"# display(Markdown(messages['messages'][3].content)) #display a nicely formatted answer for successful generations"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -293,9 +261,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python (package_test_env)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "package_test_env"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! docker run -d -p 8123:8123 -p9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:23.4.2.11"
|
||||
"! docker run -d -p 8123:8123 -p9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:24.7.6.8"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
1852
docs/docs/integrations/vectorstores/cloudflare_vectorize.ipynb
Normal file
1852
docs/docs/integrations/vectorstores/cloudflare_vectorize.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
373
docs/docs/integrations/vectorstores/opengauss.ipynb
Normal file
373
docs/docs/integrations/vectorstores/opengauss.ipynb
Normal file
@@ -0,0 +1,373 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "1957f5cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: openGauss\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ef1f0986",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# openGauss VectorStore\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with the openGauss VectorStore. [openGauss](https://opengauss.org/en/) is a high-performance relational database with native vector storage and retrieval capabilities. This integration enables ACID-compliant vector operations within LangChain applications, combining traditional SQL functionality with modern AI-driven similarity search.\n",
|
||||
" vector store."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36fdc060",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"### Launch openGauss Container"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {},
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"```bash\n",
|
||||
"docker run --name opengauss \\\n",
|
||||
" -d \\\n",
|
||||
" -e GS_PASSWORD='MyStrongPass@123' \\\n",
|
||||
" -p 8888:5432 \\\n",
|
||||
" opengauss/opengauss-server:latest\n",
|
||||
"```"
|
||||
],
|
||||
"id": "e006fdc593107ef5"
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a51b3f07b83b8a1d",
|
||||
"metadata": {},
|
||||
"source": "### Install langchain-opengauss"
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "ad030f666e228cc8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```bash\n",
|
||||
"pip install langchain-opengauss\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4d14f2f5f8ab0df7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**System Requirements**:\n",
|
||||
"- openGauss ≥ 7.0.0\n",
|
||||
"- Python ≥ 3.8\n",
|
||||
"- psycopg2-binary"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9695dee7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Using your openGauss Credentials"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "93df377e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"import EmbeddingTabs from \"@theme/EmbeddingTabs\";\n",
|
||||
"\n",
|
||||
"<EmbeddingTabs/>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dc37144c-208d-4ab3-9f3a-0407a69fe052",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_opengauss import OpenGauss, OpenGaussSettings\n",
|
||||
"\n",
|
||||
"# Configure with schema validation\n",
|
||||
"config = OpenGaussSettings(\n",
|
||||
" table_name=\"test_langchain\",\n",
|
||||
" embedding_dimension=384,\n",
|
||||
" index_type=\"HNSW\",\n",
|
||||
" distance_strategy=\"COSINE\",\n",
|
||||
")\n",
|
||||
"vector_store = OpenGauss(embedding=embeddings, config=config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac6071d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Manage vector store\n",
|
||||
"\n",
|
||||
"### Add items to vector store\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "17f5efc0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"document_1 = Document(page_content=\"foo\", metadata={\"source\": \"https://example.com\"})\n",
|
||||
"\n",
|
||||
"document_2 = Document(page_content=\"bar\", metadata={\"source\": \"https://example.com\"})\n",
|
||||
"\n",
|
||||
"document_3 = Document(page_content=\"baz\", metadata={\"source\": \"https://example.com\"})\n",
|
||||
"\n",
|
||||
"documents = [document_1, document_2, document_3]\n",
|
||||
"\n",
|
||||
"vector_store.add_documents(documents=documents, ids=[\"1\", \"2\", \"3\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c738c3e0",
|
||||
"metadata": {},
|
||||
"source": "### Update items in vector store\n"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f0aa8b71",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"updated_document = Document(\n",
|
||||
" page_content=\"qux\", metadata={\"source\": \"https://another-example.com\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# If the id is already exist, will update the document\n",
|
||||
"vector_store.add_documents(document_id=\"1\", document=updated_document)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dcf1b905",
|
||||
"metadata": {},
|
||||
"source": "### Delete items from vector store\n"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ef61e188",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vector_store.delete(ids=[\"3\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3620501",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query vector store\n",
|
||||
"\n",
|
||||
"Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent.\n",
|
||||
"\n",
|
||||
"### Query directly\n",
|
||||
"\n",
|
||||
"Performing a simple similarity search can be done as follows:\n",
|
||||
"\n",
|
||||
"- TODO: Edit and then run code cell to generate output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aa0a16fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = vector_store.similarity_search(\n",
|
||||
" query=\"thud\", k=1, filter={\"source\": \"https://another-example.com\"}\n",
|
||||
")\n",
|
||||
"for doc in results:\n",
|
||||
" print(f\"* {doc.page_content} [{doc.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3ed9d733",
|
||||
"metadata": {},
|
||||
"source": "If you want to execute a similarity search and receive the corresponding scores you can run:\n"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5efd2eaa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = vector_store.similarity_search_with_score(\n",
|
||||
" query=\"thud\", k=1, filter={\"source\": \"https://example.com\"}\n",
|
||||
")\n",
|
||||
"for doc, score in results:\n",
|
||||
" print(f\"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0c235cdc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Query by turning into retriever\n",
|
||||
"\n",
|
||||
"You can also transform the vector store into a retriever for easier usage in your chains.\n",
|
||||
"\n",
|
||||
"- TODO: Edit and then run code cell to generate output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f3460093",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever(search_type=\"mmr\", search_kwargs={\"k\": 1})\n",
|
||||
"retriever.invoke(\"thud\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "901c75dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage for retrieval-augmented generation\n",
|
||||
"\n",
|
||||
"For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n",
|
||||
"\n",
|
||||
"- [Tutorials](/docs/tutorials/)\n",
|
||||
"- [How-to: Question and answer with RAG](https://python.langchain.com/docs/how_to/#qa-with-rag)\n",
|
||||
"- [Retrieval conceptual docs](https://python.langchain.com/docs/concepts/retrieval/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "069f1b5f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Configuration\n",
|
||||
"\n",
|
||||
"### Connection Settings\n",
|
||||
"| Parameter | Default | Description |\n",
|
||||
"|---------------------|-------------------------|--------------------------------------------------------|\n",
|
||||
"| `host` | localhost | Database server address |\n",
|
||||
"| `port` | 8888 | Database connection port |\n",
|
||||
"| `user` | gaussdb | Database username |\n",
|
||||
"| `password` | - | Complex password string |\n",
|
||||
"| `database` | postgres | Default database name |\n",
|
||||
"| `min_connections` | 1 | Connection pool minimum size |\n",
|
||||
"| `max_connections` | 5 | Connection pool maximum size |\n",
|
||||
"| `table_name` | langchain_docs | Name of the table for storing vector data and metadata |\n",
|
||||
"| `index_type` | IndexType.HNSW |Vector index algorithm type. Options: HNSW or IVFFLAT\\nDefault is HNSW.|\n",
|
||||
"| `vector_type` | VectorType.vector |Type of vector representation to use. Default is Vector.|\n",
|
||||
"| `distance_strategy` | DistanceStrategy.COSINE |Vector similarity metric to use for retrieval. Options: euclidean (L2 distance), cosine (angular distance, ideal for text embeddings), manhattan (L1 distance for sparse data), negative_inner_product (dot product for normalized vectors).\\n Default is cosine.|\n",
|
||||
"|`embedding_dimension`| 1536 |Dimensionality of the vector embeddings.|\n",
|
||||
"\n",
|
||||
"### Supported Combinations\n",
|
||||
"\n",
|
||||
"| Vector Type | Dimensions | Index Types | Supported Distance Strategies |\n",
|
||||
"|-------------|------------|--------------|---------------------------------------|\n",
|
||||
"| vector | ≤2000 | HNSW/IVFFLAT | COSINE/EUCLIDEAN/MANHATTAN/INNER_PROD |\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6a7b7b7c4f5a03e1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Performance Optimization\n",
|
||||
"\n",
|
||||
"### Index Tuning Guidelines\n",
|
||||
"**HNSW Parameters**:\n",
|
||||
"- `m`: 16-100 (balance between recall and memory)\n",
|
||||
"- `ef_construction`: 64-1000 (must be > 2*m)\n",
|
||||
"\n",
|
||||
"**IVFFLAT Recommendations**:\n",
|
||||
"```python\n",
|
||||
"import math\n",
|
||||
"\n",
|
||||
"lists = min(\n",
|
||||
" int(math.sqrt(total_rows)) if total_rows > 1e6 else int(total_rows / 1000),\n",
|
||||
" 2000, # openGauss maximum\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### Connection Pooling\n",
|
||||
"```python\n",
|
||||
"OpenGaussSettings(min_connections=3, max_connections=20)\n",
|
||||
"```\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b581b499ffed641",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Limitations\n",
|
||||
"- `bit` and `sparsevec` vector types currently in development\n",
|
||||
"- Maximum vector dimensions: 2000 for `vector` type"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8a27244f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all __ModuleName__VectorStore features and configurations head to the API reference: https://python.langchain.com/api_reference/en/latest/vectorstores/opengauss.OpenGuass.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -120,7 +120,7 @@
|
||||
"1. `redis://` - Connection to Redis standalone, unencrypted\n",
|
||||
"2. `rediss://` - Connection to Redis standalone, with TLS encryption\n",
|
||||
"3. `redis+sentinel://` - Connection to Redis server via Redis Sentinel, unencrypted\n",
|
||||
"4. `rediss+sentinel://` - Connection to Redis server via Redis Sentinel, booth connections with TLS encryption\n",
|
||||
"4. `rediss+sentinel://` - Connection to Redis server via Redis Sentinel, both connections with TLS encryption\n",
|
||||
"\n",
|
||||
"More information about additional connection parameters can be found in the [redis-py documentation](https://redis-py.readthedocs.io/en/stable/connections.html)."
|
||||
]
|
||||
@@ -187,7 +187,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"id": "b1b1eb90-5155-44ca-a8a7-b04b02d5e77c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -209,7 +209,7 @@
|
||||
"# connection to redis standalone at localhost, db 0, no password but with TLS support\n",
|
||||
"redis_url = \"rediss://localhost:6379\"\n",
|
||||
"# connection to redis sentinel at localhost and default port, db 0, no password\n",
|
||||
"# but with TLS support for booth Sentinel and Redis server\n",
|
||||
"# but with TLS support for both Sentinel and Redis server\n",
|
||||
"redis_url = \"rediss+sentinel://localhost\""
|
||||
]
|
||||
},
|
||||
@@ -254,7 +254,9 @@
|
||||
"cell_type": "markdown",
|
||||
"id": "7f98392b",
|
||||
"metadata": {},
|
||||
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -899,7 +901,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdin",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Please enter your OpenAI API key: ········\n"
|
||||
|
||||
527
docs/docs/integrations/vectorstores/singlestore.ipynb
Normal file
527
docs/docs/integrations/vectorstores/singlestore.ipynb
Normal file
@@ -0,0 +1,527 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "1957f5cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: SingleStore\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ef1f0986",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreVectorStore\n",
|
||||
"\n",
|
||||
">[SingleStore](https://singlestore.com/) is a robust, high-performance distributed SQL database solution designed to excel in both [cloud](https://www.singlestore.com/cloud/) and on-premises environments. Boasting a versatile feature set, it offers seamless deployment options while delivering unparalleled performance.\n",
|
||||
"\n",
|
||||
"A standout feature of SingleStore is its advanced support for vector storage and operations, making it an ideal choice for applications requiring intricate AI capabilities such as text similarity matching. With built-in vector functions like [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), SingleStore empowers developers to implement sophisticated algorithms efficiently.\n",
|
||||
"\n",
|
||||
"For developers keen on leveraging vector data within SingleStore, a comprehensive tutorial is available, guiding them through the intricacies of [working with vector data](https://docs.singlestore.com/managed-service/en/developer-resources/functional-extensions/working-with-vector-data.html). This tutorial delves into the Vector Store within SingleStoreDB, showcasing its ability to facilitate searches based on vector similarity. Leveraging vector indexes, queries can be executed with remarkable speed, enabling swift retrieval of relevant data.\n",
|
||||
"\n",
|
||||
"Moreover, SingleStore's Vector Store seamlessly integrates with [full-text indexing based on Lucene](https://docs.singlestore.com/cloud/developer-resources/functional-extensions/working-with-full-text-search/), enabling powerful text similarity searches. Users can filter search results based on selected fields of document metadata objects, enhancing query precision.\n",
|
||||
"\n",
|
||||
"What sets SingleStore apart is its ability to combine vector and full-text searches in various ways, offering flexibility and versatility. Whether prefiltering by text or vector similarity and selecting the most relevant data, or employing a weighted sum approach to compute a final similarity score, developers have multiple options at their disposal.\n",
|
||||
"\n",
|
||||
"In essence, SingleStore provides a comprehensive solution for managing and querying vector data, offering unparalleled performance and flexibility for AI-driven applications.\n",
|
||||
"\n",
|
||||
"| Class | Package | JS support |\n",
|
||||
"| :--- | :--- | :---: |\n",
|
||||
"| SingleStoreVectorStore | langchain_singlestore | ✅ | \n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"For the langchain-community version `SingleStoreDB` (deprecated), see\n",
|
||||
"the [v0.2 documentation](https://python.langchain.com/v0.2/docs/integrations/vectorstores/singlestoredb/).\n",
|
||||
"\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36fdc060",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access SingleStore vector stores you'll need to install the `langchain-singlestore` integration package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "64e28aa6",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"%pip install -qU \"langchain-singlestore\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "93df377e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"To initialize `SingleStoreVectorStore`, you need an `Embeddings` object and connection parameters for the SingleStore database.\n",
|
||||
"\n",
|
||||
"### Required Parameters:\n",
|
||||
"- **embedding** (`Embeddings`): A text embedding model.\n",
|
||||
"\n",
|
||||
"### Optional Parameters:\n",
|
||||
"- **distance_strategy** (`DistanceStrategy`): Strategy for calculating vector distances. Defaults to `DOT_PRODUCT`. Options:\n",
|
||||
" - `DOT_PRODUCT`: Computes the scalar product of two vectors.\n",
|
||||
" - `EUCLIDEAN_DISTANCE`: Computes the Euclidean distance between two vectors.\n",
|
||||
"\n",
|
||||
"- **table_name** (`str`): Name of the table. Defaults to `embeddings`.\n",
|
||||
"- **content_field** (`str`): Field for storing content. Defaults to `content`.\n",
|
||||
"- **metadata_field** (`str`): Field for storing metadata. Defaults to `metadata`.\n",
|
||||
"- **vector_field** (`str`): Field for storing vectors. Defaults to `vector`.\n",
|
||||
"- **id_field** (`str`): Field for storing IDs. Defaults to `id`.\n",
|
||||
"\n",
|
||||
"- **use_vector_index** (`bool`): Enables vector indexing (requires SingleStore 8.5+). Defaults to `False`.\n",
|
||||
"- **vector_index_name** (`str`): Name of the vector index. Ignored if `use_vector_index` is `False`.\n",
|
||||
"- **vector_index_options** (`dict`): Options for the vector index. Ignored if `use_vector_index` is `False`.\n",
|
||||
"- **vector_size** (`int`): Size of the vector. Required if `use_vector_index` is `True`.\n",
|
||||
"\n",
|
||||
"- **use_full_text_search** (`bool`): Enables full-text indexing on content. Defaults to `False`.\n",
|
||||
"\n",
|
||||
"### Connection Pool Parameters:\n",
|
||||
"- **pool_size** (`int`): Number of active connections in the pool. Defaults to `5`.\n",
|
||||
"- **max_overflow** (`int`): Maximum connections beyond `pool_size`. Defaults to `10`.\n",
|
||||
"- **timeout** (`float`): Connection timeout in seconds. Defaults to `30`.\n",
|
||||
"\n",
|
||||
"### Database Connection Parameters:\n",
|
||||
"- **host** (`str`): Hostname, IP, or URL for the database.\n",
|
||||
"- **user** (`str`): Database username.\n",
|
||||
"- **password** (`str`): Database password.\n",
|
||||
"- **port** (`int`): Database port. Defaults to `3306`.\n",
|
||||
"- **database** (`str`): Database name.\n",
|
||||
"\n",
|
||||
"### Additional Options:\n",
|
||||
"- **pure_python** (`bool`): Enables pure Python mode.\n",
|
||||
"- **local_infile** (`bool`): Allows local file uploads.\n",
|
||||
"- **charset** (`str`): Character set for string values.\n",
|
||||
"- **ssl_key**, **ssl_cert**, **ssl_ca** (`str`): Paths to SSL files.\n",
|
||||
"- **ssl_disabled** (`bool`): Disables SSL.\n",
|
||||
"- **ssl_verify_cert** (`bool`): Verifies server's certificate.\n",
|
||||
"- **ssl_verify_identity** (`bool`): Verifies server's identity.\n",
|
||||
"- **autocommit** (`bool`): Enables autocommits.\n",
|
||||
"- **results_type** (`str`): Structure of query results (e.g., `tuples`, `dicts`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dc37144c-208d-4ab3-9f3a-0407a69fe052",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_singlestore.vectorstores import SingleStoreVectorStore\n",
|
||||
"\n",
|
||||
"os.environ[\"SINGLESTOREDB_URL\"] = \"root:pass@localhost:3306/db\"\n",
|
||||
"\n",
|
||||
"vector_store = SingleStoreVectorStore(embeddings=embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac6071d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Manage vector store\n",
|
||||
"\n",
|
||||
"The `SingleStoreVectorStore` assumes that a Document's ID is an integer. Below are examples of how to manage the vector store.\n",
|
||||
"\n",
|
||||
"### Add items to vector store\n",
|
||||
"\n",
|
||||
"You can add documents to the vector store as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "26e0c6e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-core"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "17f5efc0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"In the parched desert, a sudden rainstorm brought relief,\n",
|
||||
" as the droplets danced upon the thirsty earth, rejuvenating the landscape\n",
|
||||
" with the sweet scent of petrichor.\"\"\",\n",
|
||||
" metadata={\"category\": \"rain\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"Amidst the bustling cityscape, the rain fell relentlessly,\n",
|
||||
" creating a symphony of pitter-patter on the pavement, while umbrellas\n",
|
||||
" bloomed like colorful flowers in a sea of gray.\"\"\",\n",
|
||||
" metadata={\"category\": \"rain\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"High in the mountains, the rain transformed into a delicate\n",
|
||||
" mist, enveloping the peaks in a mystical veil, where each droplet seemed to\n",
|
||||
" whisper secrets to the ancient rocks below.\"\"\",\n",
|
||||
" metadata={\"category\": \"rain\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"Blanketing the countryside in a soft, pristine layer, the\n",
|
||||
" snowfall painted a serene tableau, muffling the world in a tranquil hush\n",
|
||||
" as delicate flakes settled upon the branches of trees like nature's own \n",
|
||||
" lacework.\"\"\",\n",
|
||||
" metadata={\"category\": \"snow\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"In the urban landscape, snow descended, transforming\n",
|
||||
" bustling streets into a winter wonderland, where the laughter of\n",
|
||||
" children echoed amidst the flurry of snowballs and the twinkle of\n",
|
||||
" holiday lights.\"\"\",\n",
|
||||
" metadata={\"category\": \"snow\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"Atop the rugged peaks, snow fell with an unyielding\n",
|
||||
" intensity, sculpting the landscape into a pristine alpine paradise,\n",
|
||||
" where the frozen crystals shimmered under the moonlight, casting a\n",
|
||||
" spell of enchantment over the wilderness below.\"\"\",\n",
|
||||
" metadata={\"category\": \"snow\"},\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"vector_store.add_documents(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c738c3e0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Update items in vector store\n",
|
||||
"\n",
|
||||
"To update an existing document in the vector store, use the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f0aa8b71",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"updated_document = Document(\n",
|
||||
" page_content=\"qux\", metadata={\"source\": \"https://another-example.com\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vector_store.update_documents(document_id=\"1\", document=updated_document)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dcf1b905",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Delete items from vector store\n",
|
||||
"\n",
|
||||
"To delete documents from the vector store, use the following code:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ef61e188",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"vector_store.delete(ids=[\"3\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3620501",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query vector store\n",
|
||||
"\n",
|
||||
"Once your vector store has been created and the relevant documents have been added you will most likely wish to query it during the running of your chain or agent. \n",
|
||||
"\n",
|
||||
"### Query directly\n",
|
||||
"\n",
|
||||
"Performing a simple similarity search can be done as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aa0a16fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = vector_store.similarity_search(query=\"trees in the snow\", k=1)\n",
|
||||
"for doc in results:\n",
|
||||
" print(f\"* {doc.page_content} [{doc.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3ed9d733",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to execute a similarity search and receive the corresponding scores you can run:\n",
|
||||
"\n",
|
||||
"- TODO: Edit and then run code cell to generate output"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5efd2eaa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"results = vector_store.similarity_search_with_score(query=\"trees in the snow\", k=1)\n",
|
||||
"for doc, score in results:\n",
|
||||
" print(f\"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fdaae211",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Metadata filtering\n",
|
||||
"SingleStoreDB elevates search capabilities by enabling users to enhance and refine search results through prefiltering based on metadata fields. This functionality empowers developers and data analysts to fine-tune queries, ensuring that search results are precisely tailored to their requirements. By filtering search results using specific metadata attributes, users can narrow down the scope of their queries, focusing only on relevant data subsets. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5c9e9989",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"trees branches\"\n",
|
||||
"docs = vector_store.similarity_search(\n",
|
||||
" query, filter={\"category\": \"snow\"}\n",
|
||||
") # Find documents that correspond to the query and has category \"snow\"\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c14b0bc3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Vector index\n",
|
||||
"Enhance your search efficiency with SingleStore DB version 8.5 or above by leveraging [ANN vector indexes](https://docs.singlestore.com/cloud/reference/sql-reference/vector-functions/vector-indexing/). By setting `use_vector_index=True` during vector store object creation, you can activate this feature. Additionally, if your vectors differ in dimensionality from the default OpenAI embedding size of 1536, ensure to specify the `vector_size` parameter accordingly. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e1f2a202",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Search strategies\n",
|
||||
"SingleStoreDB presents a diverse range of search strategies, each meticulously crafted to cater to specific use cases and user preferences. The default `VECTOR_ONLY` strategy utilizes vector operations such as `dot_product` or `euclidean_distance` to calculate similarity scores directly between vectors, while `TEXT_ONLY` employs Lucene-based full-text search, particularly advantageous for text-centric applications. For users seeking a balanced approach, `FILTER_BY_TEXT` first refines results based on text similarity before conducting vector comparisons, whereas `FILTER_BY_VECTOR` prioritizes vector similarity, filtering results before assessing text similarity for optimal matches. Notably, both `FILTER_BY_TEXT` and `FILTER_BY_VECTOR` necessitate a full-text index for operation. Additionally, `WEIGHTED_SUM` emerges as a sophisticated strategy, calculating the final similarity score by weighing vector and text similarities, albeit exclusively utilizing dot_product distance calculations and also requiring a full-text index. These versatile strategies empower users to fine-tune searches according to their unique needs, facilitating efficient and precise data retrieval and analysis. Moreover, SingleStoreDB's hybrid approaches, exemplified by `FILTER_BY_TEXT`, `FILTER_BY_VECTOR`, and `WEIGHTED_SUM` strategies, seamlessly blend vector and text-based searches to maximize efficiency and accuracy, ensuring users can fully leverage the platform's capabilities for a wide range of applications."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "15093016",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_singlestore.vectorstores import DistanceStrategy\n",
|
||||
"\n",
|
||||
"docsearch = SingleStoreVectorStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" distance_strategy=DistanceStrategy.DOT_PRODUCT, # Use dot product for similarity search\n",
|
||||
" use_vector_index=True, # Use vector index for faster search\n",
|
||||
" use_full_text_search=True, # Use full text index\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vectorResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreVectorStore.SearchStrategy.VECTOR_ONLY,\n",
|
||||
" filter={\"category\": \"rain\"},\n",
|
||||
")\n",
|
||||
"print(vectorResults[0].page_content)\n",
|
||||
"\n",
|
||||
"textResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreVectorStore.SearchStrategy.TEXT_ONLY,\n",
|
||||
")\n",
|
||||
"print(textResults[0].page_content)\n",
|
||||
"\n",
|
||||
"filteredByTextResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreVectorStore.SearchStrategy.FILTER_BY_TEXT,\n",
|
||||
" filter_threshold=0.1,\n",
|
||||
")\n",
|
||||
"print(filteredByTextResults[0].page_content)\n",
|
||||
"\n",
|
||||
"filteredByVectorResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreVectorStore.SearchStrategy.FILTER_BY_VECTOR,\n",
|
||||
" filter_threshold=0.1,\n",
|
||||
")\n",
|
||||
"print(filteredByVectorResults[0].page_content)\n",
|
||||
"\n",
|
||||
"weightedSumResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreVectorStore.SearchStrategy.WEIGHTED_SUM,\n",
|
||||
" text_weight=0.2,\n",
|
||||
" vector_weight=0.8,\n",
|
||||
")\n",
|
||||
"print(weightedSumResults[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0c235cdc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Query by turning into retriever\n",
|
||||
"\n",
|
||||
"You can also transform the vector store into a retriever for easier usage in your chains. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f3460093",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever(search_kwargs={\"k\": 1})\n",
|
||||
"retriever.invoke(\"trees in the snow\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8bf60ab4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal Example: Leveraging CLIP and OpenClip Embeddings\n",
|
||||
"\n",
|
||||
"In the realm of multi-modal data analysis, the integration of diverse information types like images and text has become increasingly crucial. One powerful tool facilitating such integration is [CLIP](https://openai.com/research/clip), a cutting-edge model capable of embedding both images and text into a shared semantic space. By doing so, CLIP enables the retrieval of relevant content across different modalities through similarity search.\n",
|
||||
"\n",
|
||||
"To illustrate, let's consider an application scenario where we aim to effectively analyze multi-modal data. In this example, we harness the capabilities of [OpenClip multimodal embeddings](/docs/integrations/text_embedding/open_clip), which leverage CLIP's framework. With OpenClip, we can seamlessly embed textual descriptions alongside corresponding images, enabling comprehensive analysis and retrieval tasks. Whether it's identifying visually similar images based on textual queries or finding relevant text passages associated with specific visual content, OpenClip empowers users to explore and extract insights from multi-modal data with remarkable efficiency and accuracy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "710f6be9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U langchain openai lanchain-singlestore langchain-experimental"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e26825f1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_experimental.open_clip import OpenCLIPEmbeddings\n",
|
||||
"from langchain_singlestore.vectorstores import SingleStoreVectorStore\n",
|
||||
"\n",
|
||||
"os.environ[\"SINGLESTOREDB_URL\"] = \"root:pass@localhost:3306/db\"\n",
|
||||
"\n",
|
||||
"TEST_IMAGES_DIR = \"../../modules/images\"\n",
|
||||
"\n",
|
||||
"docsearch = SingleStoreVectorStore(OpenCLIPEmbeddings())\n",
|
||||
"\n",
|
||||
"image_uris = sorted(\n",
|
||||
" [\n",
|
||||
" os.path.join(TEST_IMAGES_DIR, image_name)\n",
|
||||
" for image_name in os.listdir(TEST_IMAGES_DIR)\n",
|
||||
" if image_name.endswith(\".jpg\")\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add images\n",
|
||||
"docsearch.add_images(uris=image_uris)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c87779e8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage for retrieval-augmented generation\n",
|
||||
"\n",
|
||||
"For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n",
|
||||
"\n",
|
||||
"- [Tutorials](/docs/tutorials/)\n",
|
||||
"- [How-to: Question and answer with RAG](https://python.langchain.com/docs/how_to/#qa-with-rag)\n",
|
||||
"- [Retrieval conceptual docs](https://python.langchain.com/docs/concepts/retrieval)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6f717924",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all SingleStore Document Loader features and configurations head to the github page: [https://github.com/singlestore-labs/langchain-singlestore/](https://github.com/singlestore-labs/langchain-singlestore/)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,326 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b9582dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# SingleStoreDB\n",
|
||||
">[SingleStoreDB](https://singlestore.com/) is a robust, high-performance distributed SQL database solution designed to excel in both [cloud](https://www.singlestore.com/cloud/) and on-premises environments. Boasting a versatile feature set, it offers seamless deployment options while delivering unparalleled performance.\n",
|
||||
"\n",
|
||||
"A standout feature of SingleStoreDB is its advanced support for vector storage and operations, making it an ideal choice for applications requiring intricate AI capabilities such as text similarity matching. With built-in vector functions like [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), SingleStoreDB empowers developers to implement sophisticated algorithms efficiently.\n",
|
||||
"\n",
|
||||
"For developers keen on leveraging vector data within SingleStoreDB, a comprehensive tutorial is available, guiding them through the intricacies of [working with vector data](https://docs.singlestore.com/managed-service/en/developer-resources/functional-extensions/working-with-vector-data.html). This tutorial delves into the Vector Store within SingleStoreDB, showcasing its ability to facilitate searches based on vector similarity. Leveraging vector indexes, queries can be executed with remarkable speed, enabling swift retrieval of relevant data.\n",
|
||||
"\n",
|
||||
"Moreover, SingleStoreDB's Vector Store seamlessly integrates with [full-text indexing based on Lucene](https://docs.singlestore.com/cloud/developer-resources/functional-extensions/working-with-full-text-search/), enabling powerful text similarity searches. Users can filter search results based on selected fields of document metadata objects, enhancing query precision.\n",
|
||||
"\n",
|
||||
"What sets SingleStoreDB apart is its ability to combine vector and full-text searches in various ways, offering flexibility and versatility. Whether prefiltering by text or vector similarity and selecting the most relevant data, or employing a weighted sum approach to compute a final similarity score, developers have multiple options at their disposal.\n",
|
||||
"\n",
|
||||
"In essence, SingleStoreDB provides a comprehensive solution for managing and querying vector data, offering unparalleled performance and flexibility for AI-driven applications.\n",
|
||||
"\n",
|
||||
"You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e4a61a4d",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Establishing a connection to the database is facilitated through the singlestoredb Python connector.\n",
|
||||
"# Please ensure that this connector is installed in your working environment.\n",
|
||||
"%pip install --upgrade --quiet singlestoredb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "39a0132a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# We want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6104fde8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import SingleStoreDB\n",
|
||||
"from langchain_community.vectorstores.utils import DistanceStrategy\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7b45113c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# loading docs\n",
|
||||
"# we will use some artificial data for this example\n",
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"In the parched desert, a sudden rainstorm brought relief,\n",
|
||||
" as the droplets danced upon the thirsty earth, rejuvenating the landscape\n",
|
||||
" with the sweet scent of petrichor.\"\"\",\n",
|
||||
" metadata={\"category\": \"rain\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"Amidst the bustling cityscape, the rain fell relentlessly,\n",
|
||||
" creating a symphony of pitter-patter on the pavement, while umbrellas\n",
|
||||
" bloomed like colorful flowers in a sea of gray.\"\"\",\n",
|
||||
" metadata={\"category\": \"rain\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"High in the mountains, the rain transformed into a delicate\n",
|
||||
" mist, enveloping the peaks in a mystical veil, where each droplet seemed to\n",
|
||||
" whisper secrets to the ancient rocks below.\"\"\",\n",
|
||||
" metadata={\"category\": \"rain\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"Blanketing the countryside in a soft, pristine layer, the\n",
|
||||
" snowfall painted a serene tableau, muffling the world in a tranquil hush\n",
|
||||
" as delicate flakes settled upon the branches of trees like nature's own \n",
|
||||
" lacework.\"\"\",\n",
|
||||
" metadata={\"category\": \"snow\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"In the urban landscape, snow descended, transforming\n",
|
||||
" bustling streets into a winter wonderland, where the laughter of\n",
|
||||
" children echoed amidst the flurry of snowballs and the twinkle of\n",
|
||||
" holiday lights.\"\"\",\n",
|
||||
" metadata={\"category\": \"snow\"},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"\"\"Atop the rugged peaks, snow fell with an unyielding\n",
|
||||
" intensity, sculpting the landscape into a pristine alpine paradise,\n",
|
||||
" where the frozen crystals shimmered under the moonlight, casting a\n",
|
||||
" spell of enchantment over the wilderness below.\"\"\",\n",
|
||||
" metadata={\"category\": \"snow\"},\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "535b2687",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"There are several ways to establish a [connection](https://singlestoredb-python.labs.singlestore.com/generated/singlestoredb.connect.html) to the database. You can either set up environment variables or pass named parameters to the `SingleStoreDB constructor`. Alternatively, you may provide these parameters to the `from_documents` and `from_texts` methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d0b316bf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setup connection url as environment variable\n",
|
||||
"os.environ[\"SINGLESTOREDB_URL\"] = \"root:pass@localhost:3306/db\"\n",
|
||||
"\n",
|
||||
"# Load documents to the store\n",
|
||||
"docsearch = SingleStoreDB.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" table_name=\"notebook\", # use table with a custom name\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0eaa4297",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"trees in the snow\"\n",
|
||||
"docs = docsearch.similarity_search(query) # Find documents that correspond to the query\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "51b2b552",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"SingleStoreDB elevates search capabilities by enabling users to enhance and refine search results through prefiltering based on metadata fields. This functionality empowers developers and data analysts to fine-tune queries, ensuring that search results are precisely tailored to their requirements. By filtering search results using specific metadata attributes, users can narrow down the scope of their queries, focusing only on relevant data subsets. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "389bf801",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query = \"trees branches\"\n",
|
||||
"docs = docsearch.similarity_search(\n",
|
||||
" query, filter={\"category\": \"snow\"}\n",
|
||||
") # Find documents that correspond to the query and has category \"snow\"\n",
|
||||
"print(docs[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "035cba66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Enhance your search efficiency with SingleStore DB version 8.5 or above by leveraging [ANN vector indexes](https://docs.singlestore.com/cloud/reference/sql-reference/vector-functions/vector-indexing/). By setting `use_vector_index=True` during vector store object creation, you can activate this feature. Additionally, if your vectors differ in dimensionality from the default OpenAI embedding size of 1536, ensure to specify the `vector_size` parameter accordingly. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5308afe5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"SingleStoreDB presents a diverse range of search strategies, each meticulously crafted to cater to specific use cases and user preferences. The default `VECTOR_ONLY` strategy utilizes vector operations such as `dot_product` or `euclidean_distance` to calculate similarity scores directly between vectors, while `TEXT_ONLY` employs Lucene-based full-text search, particularly advantageous for text-centric applications. For users seeking a balanced approach, `FILTER_BY_TEXT` first refines results based on text similarity before conducting vector comparisons, whereas `FILTER_BY_VECTOR` prioritizes vector similarity, filtering results before assessing text similarity for optimal matches. Notably, both `FILTER_BY_TEXT` and `FILTER_BY_VECTOR` necessitate a full-text index for operation. Additionally, `WEIGHTED_SUM` emerges as a sophisticated strategy, calculating the final similarity score by weighing vector and text similarities, albeit exclusively utilizing dot_product distance calculations and also requiring a full-text index. These versatile strategies empower users to fine-tune searches according to their unique needs, facilitating efficient and precise data retrieval and analysis. Moreover, SingleStoreDB's hybrid approaches, exemplified by `FILTER_BY_TEXT`, `FILTER_BY_VECTOR`, and `WEIGHTED_SUM` strategies, seamlessly blend vector and text-based searches to maximize efficiency and accuracy, ensuring users can fully leverage the platform's capabilities for a wide range of applications."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "17db0116",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch = SingleStoreDB.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" distance_strategy=DistanceStrategy.DOT_PRODUCT, # Use dot product for similarity search\n",
|
||||
" use_vector_index=True, # Use vector index for faster search\n",
|
||||
" use_full_text_search=True, # Use full text index\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"vectorResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreDB.SearchStrategy.VECTOR_ONLY,\n",
|
||||
" filter={\"category\": \"rain\"},\n",
|
||||
")\n",
|
||||
"print(vectorResults[0].page_content)\n",
|
||||
"\n",
|
||||
"textResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreDB.SearchStrategy.TEXT_ONLY,\n",
|
||||
")\n",
|
||||
"print(textResults[0].page_content)\n",
|
||||
"\n",
|
||||
"filteredByTextResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreDB.SearchStrategy.FILTER_BY_TEXT,\n",
|
||||
" filter_threshold=0.1,\n",
|
||||
")\n",
|
||||
"print(filteredByTextResults[0].page_content)\n",
|
||||
"\n",
|
||||
"filteredByVectorResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreDB.SearchStrategy.FILTER_BY_VECTOR,\n",
|
||||
" filter_threshold=0.1,\n",
|
||||
")\n",
|
||||
"print(filteredByVectorResults[0].page_content)\n",
|
||||
"\n",
|
||||
"weightedSumResults = docsearch.similarity_search(\n",
|
||||
" \"rainstorm in parched desert, rain\",\n",
|
||||
" k=1,\n",
|
||||
" search_strategy=SingleStoreDB.SearchStrategy.WEIGHTED_SUM,\n",
|
||||
" text_weight=0.2,\n",
|
||||
" vector_weight=0.8,\n",
|
||||
")\n",
|
||||
"print(weightedSumResults[0].page_content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "86efff90",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-modal Example: Leveraging CLIP and OpenClip Embeddings\n",
|
||||
"\n",
|
||||
"In the realm of multi-modal data analysis, the integration of diverse information types like images and text has become increasingly crucial. One powerful tool facilitating such integration is [CLIP](https://openai.com/research/clip), a cutting-edge model capable of embedding both images and text into a shared semantic space. By doing so, CLIP enables the retrieval of relevant content across different modalities through similarity search.\n",
|
||||
"\n",
|
||||
"To illustrate, let's consider an application scenario where we aim to effectively analyze multi-modal data. In this example, we harness the capabilities of [OpenClip multimodal embeddings](/docs/integrations/text_embedding/open_clip), which leverage CLIP's framework. With OpenClip, we can seamlessly embed textual descriptions alongside corresponding images, enabling comprehensive analysis and retrieval tasks. Whether it's identifying visually similar images based on textual queries or finding relevant text passages associated with specific visual content, OpenClip empowers users to explore and extract insights from multi-modal data with remarkable efficiency and accuracy."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9c0bce88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U langchain openai singlestoredb langchain-experimental # (newest versions required for multi-modal)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "21a8c25c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import SingleStoreDB\n",
|
||||
"from langchain_experimental.open_clip import OpenCLIPEmbeddings\n",
|
||||
"\n",
|
||||
"os.environ[\"SINGLESTOREDB_URL\"] = \"root:pass@localhost:3306/db\"\n",
|
||||
"\n",
|
||||
"TEST_IMAGES_DIR = \"../../modules/images\"\n",
|
||||
"\n",
|
||||
"docsearch = SingleStoreDB(OpenCLIPEmbeddings())\n",
|
||||
"\n",
|
||||
"image_uris = sorted(\n",
|
||||
" [\n",
|
||||
" os.path.join(TEST_IMAGES_DIR, image_name)\n",
|
||||
" for image_name in os.listdir(TEST_IMAGES_DIR)\n",
|
||||
" if image_name.endswith(\".jpg\")\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Add images\n",
|
||||
"docsearch.add_images(uris=image_uris)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
491
docs/docs/integrations/vectorstores/ydb.ipynb
Normal file
491
docs/docs/integrations/vectorstores/ydb.ipynb
Normal file
@@ -0,0 +1,491 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# YDB\n",
|
||||
"\n",
|
||||
"> [YDB](https://ydb.tech/) is a versatile open source Distributed SQL Database that combines high availability and scalability with strong consistency and ACID transactions. It accommodates transactional (OLTP), analytical (OLAP), and streaming workloads simultaneously.\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the `YDB` vector store.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, set up a local YDB with Docker:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "da75f17c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! docker run -d -p 2136:2136 --name ydb-langchain -e YDB_USE_IN_MEMORY_PDISKS=true -h localhost ydbplatform/local-ydb:trunk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0acb2a8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You'll need to install `langchain-ydb` to use this integration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d454fb7c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install -qU langchain-ydb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3df5501b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"There are no credentials for this notebook, just make sure you have installed the packages as shown above."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "54d5276f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you want to get best in-class automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f6fd5b03",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b87fe34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initialization\n",
|
||||
"\n",
|
||||
"import EmbeddingTabs from \"@theme/EmbeddingTabs\";\n",
|
||||
"\n",
|
||||
"<EmbeddingTabs/>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "60276097",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/ovcharuk/opensource/langchain/.venv/lib/python3.13/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings(model=\"text-embedding-3-large\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aac9563e",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-06-03T08:33:31.554934Z",
|
||||
"start_time": "2023-06-03T08:33:31.549590Z"
|
||||
},
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ydb.vectorstores import YDB, YDBSearchStrategy, YDBSettings\n",
|
||||
"\n",
|
||||
"settings = YDBSettings(\n",
|
||||
" table=\"ydb_example\",\n",
|
||||
" strategy=YDBSearchStrategy.COSINE_SIMILARITY,\n",
|
||||
")\n",
|
||||
"vector_store = YDB(embeddings, config=settings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "32dd3f67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Manage vector store\n",
|
||||
"\n",
|
||||
"Once you have created your vector store, you can interact with it by adding and deleting different items.\n",
|
||||
"\n",
|
||||
"### Add items to vector store\n",
|
||||
"\n",
|
||||
"Prepare documents to work with:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "944743ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from uuid import uuid4\n",
|
||||
"\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"document_1 = Document(\n",
|
||||
" page_content=\"I had chocalate chip pancakes and scrambled eggs for breakfast this morning.\",\n",
|
||||
" metadata={\"source\": \"tweet\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_2 = Document(\n",
|
||||
" page_content=\"The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees.\",\n",
|
||||
" metadata={\"source\": \"news\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_3 = Document(\n",
|
||||
" page_content=\"Building an exciting new project with LangChain - come check it out!\",\n",
|
||||
" metadata={\"source\": \"tweet\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_4 = Document(\n",
|
||||
" page_content=\"Robbers broke into the city bank and stole $1 million in cash.\",\n",
|
||||
" metadata={\"source\": \"news\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_5 = Document(\n",
|
||||
" page_content=\"Wow! That was an amazing movie. I can't wait to see it again.\",\n",
|
||||
" metadata={\"source\": \"tweet\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_6 = Document(\n",
|
||||
" page_content=\"Is the new iPhone worth the price? Read this review to find out.\",\n",
|
||||
" metadata={\"source\": \"website\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_7 = Document(\n",
|
||||
" page_content=\"The top 10 soccer players in the world right now.\",\n",
|
||||
" metadata={\"source\": \"website\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_8 = Document(\n",
|
||||
" page_content=\"LangGraph is the best framework for building stateful, agentic applications!\",\n",
|
||||
" metadata={\"source\": \"tweet\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_9 = Document(\n",
|
||||
" page_content=\"The stock market is down 500 points today due to fears of a recession.\",\n",
|
||||
" metadata={\"source\": \"news\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"document_10 = Document(\n",
|
||||
" page_content=\"I have a bad feeling I am going to get deleted :(\",\n",
|
||||
" metadata={\"source\": \"tweet\"},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"documents = [\n",
|
||||
" document_1,\n",
|
||||
" document_2,\n",
|
||||
" document_3,\n",
|
||||
" document_4,\n",
|
||||
" document_5,\n",
|
||||
" document_6,\n",
|
||||
" document_7,\n",
|
||||
" document_8,\n",
|
||||
" document_9,\n",
|
||||
" document_10,\n",
|
||||
"]\n",
|
||||
"uuids = [str(uuid4()) for _ in range(len(documents))]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c616b7f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can add items to your vector store by using the `add_documents` function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3f632996",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Inserting data...: 100%|██████████| 10/10 [00:00<00:00, 14.67it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['947be6aa-d489-44c5-910e-62e4d58d2ffb',\n",
|
||||
" '7a62904d-9db3-412b-83b6-f01b34dd7de3',\n",
|
||||
" 'e5a49c64-c985-4ed7-ac58-5ffa31ade699',\n",
|
||||
" '99cf4104-36ab-4bd5-b0da-e210d260e512',\n",
|
||||
" '5810bcd0-b46e-443e-a663-e888c9e028d1',\n",
|
||||
" '190c193d-844e-4dbb-9a4b-b8f5f16cfae6',\n",
|
||||
" 'f8912944-f80a-4178-954e-4595bf59e341',\n",
|
||||
" '34fc7b09-6000-42c9-95f7-7d49f430b904',\n",
|
||||
" '0f6b6783-f300-4a4d-bb04-8025c4dfd409',\n",
|
||||
" '46c37ba9-7cf2-4ac8-9bd1-d84e2cb1155c']"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vector_store.add_documents(documents=documents, ids=uuids)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18af81cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Delete items from vector store\n",
|
||||
"\n",
|
||||
"You can delete items from your vector store by ID using the `delete` function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "12b32762",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"vector_store.delete(ids=[uuids[-1]])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ada27577",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query vector store\n",
|
||||
"\n",
|
||||
"Once your vector store has been created and relevant documents have been added, you will likely want to query it during the execution of your chain or agent.\n",
|
||||
"\n",
|
||||
"### Query directly\n",
|
||||
"\n",
|
||||
"#### Similarity search\n",
|
||||
"\n",
|
||||
"A simple similarity search can be performed as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "015831a3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"* Building an exciting new project with LangChain - come check it out! [{'source': 'tweet'}]\n",
|
||||
"* LangGraph is the best framework for building stateful, agentic applications! [{'source': 'tweet'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = vector_store.similarity_search(\n",
|
||||
" \"LangChain provides abstractions to make working with LLMs easy\", k=2\n",
|
||||
")\n",
|
||||
"for res in results:\n",
|
||||
" print(f\"* {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "623d3b9d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Similarity search with score\n",
|
||||
"\n",
|
||||
"You can also perform a search with a score:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "e7d43430",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"* [SIM=0.595] The weather forecast for tomorrow is cloudy and overcast, with a high of 62 degrees. [{'source': 'news'}]\n",
|
||||
"* [SIM=0.212] I had chocalate chip pancakes and scrambled eggs for breakfast this morning. [{'source': 'tweet'}]\n",
|
||||
"* [SIM=0.118] Wow! That was an amazing movie. I can't wait to see it again. [{'source': 'tweet'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = vector_store.similarity_search_with_score(\"Will it be hot tomorrow?\", k=3)\n",
|
||||
"for res, score in results:\n",
|
||||
" print(f\"* [SIM={score:.3f}] {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f5a90c12",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Filtering\n",
|
||||
"\n",
|
||||
"You can search with filters as described below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "169d01d1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"* I had chocalate chip pancakes and scrambled eggs for breakfast this morning. [{'source': 'tweet'}]\n",
|
||||
"* Wow! That was an amazing movie. I can't wait to see it again. [{'source': 'tweet'}]\n",
|
||||
"* Building an exciting new project with LangChain - come check it out! [{'source': 'tweet'}]\n",
|
||||
"* LangGraph is the best framework for building stateful, agentic applications! [{'source': 'tweet'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = vector_store.similarity_search_with_score(\n",
|
||||
" \"What did I eat for breakfast?\",\n",
|
||||
" k=4,\n",
|
||||
" filter={\"source\": \"tweet\"},\n",
|
||||
")\n",
|
||||
"for res, _ in results:\n",
|
||||
" print(f\"* {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "afacfd4e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Query by turning into retriever\n",
|
||||
"\n",
|
||||
"You can also transform the vector store into a retriever for easier usage in your chains.\n",
|
||||
"\n",
|
||||
"Here's how to transform your vector store into a retriever and then invoke the retriever with a simple query and filter."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "97187188",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"* Robbers broke into the city bank and stole $1 million in cash. [{'source': 'news'}]\n",
|
||||
"* The stock market is down 500 points today due to fears of a recession. [{'source': 'news'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever = vector_store.as_retriever(\n",
|
||||
" search_kwargs={\"k\": 2},\n",
|
||||
")\n",
|
||||
"results = retriever.invoke(\n",
|
||||
" \"Stealing from the bank is a crime\", filter={\"source\": \"news\"}\n",
|
||||
")\n",
|
||||
"for res in results:\n",
|
||||
" print(f\"* {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "57fade30",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage for retrieval-augmented generation\n",
|
||||
"\n",
|
||||
"For guides on how to use this vector store for retrieval-augmented generation (RAG), see the following sections:\n",
|
||||
"\n",
|
||||
"- [Tutorials](/docs/tutorials/)\n",
|
||||
"- [How-to: Question and answer with RAG](https://python.langchain.com/docs/how_to/#qa-with-rag)\n",
|
||||
"- [Retrieval conceptual docs](https://python.langchain.com/docs/concepts/retrieval)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "02452d34",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `YDB` features and configurations head to the API reference:https://python.langchain.com/api_reference/community/vectorstores/langchain_community.vectorstores.ydb.YDB.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.13.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -413,7 +413,7 @@
|
||||
"Yellowbrick also supports indexing using the Locality-Sensitive Hashing approach. This is an approximate nearest-neighbor search technique, and allows one to trade off similarity search time at the expense of accuracy. The index introduces two new tunable parameters:\n",
|
||||
"\n",
|
||||
"- The number of hyperplanes, which is provided as an argument to `create_lsh_index(num_hyperplanes)`. The more documents, the more hyperplanes are needed. LSH is a form of dimensionality reduction. The original embeddings are transformed into lower dimensional vectors where the number of components is the same as the number of hyperplanes.\n",
|
||||
"- The Hamming distance, an integer representing the breadth of the search. Smaller Hamming distances result in faster retreival but lower accuracy.\n",
|
||||
"- The Hamming distance, an integer representing the breadth of the search. Smaller Hamming distances result in faster retrieval but lower accuracy.\n",
|
||||
"\n",
|
||||
"Here's how you can create an index on the embeddings we loaded into Yellowbrick. We'll also re-run the previous chat session, but this time the retrieval will use the index. Note that for such a small number of documents, you won't see the benefit of indexing in terms of performance."
|
||||
]
|
||||
|
||||
@@ -38,7 +38,7 @@ LangSmith allows you to closely trace, monitor and evaluate your LLM application
|
||||
It seamlessly integrates with LangChain, and you can use it to inspect and debug individual steps of your chains as you build.
|
||||
|
||||
LangSmith documentation is hosted on a separate site.
|
||||
You can peruse [LangSmith tutorials here](https://docs.smith.langchain.com/tutorials/).
|
||||
You can peruse [LangSmith tutorials here](https://docs.smith.langchain.com/).
|
||||
|
||||
### Evaluation
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"- Map a process to the set of documents, where the process includes generating a score;\n",
|
||||
"- Rank the results by score and return the maximum.\n",
|
||||
"\n",
|
||||
"A common process in this scenario is question-answering using pieces of context from a document. Forcing the model to generate score along with its answer helps to select for answers generated only by relevant context.\n",
|
||||
"A common process in this scenario is question-answering using pieces of context from a document. Forcing the model to generate a score along with its answer helps to select for answers generated only by relevant context.\n",
|
||||
"\n",
|
||||
"An [LangGraph](https://langchain-ai.github.io/langgraph/) implementation allows for the incorporation of [tool calling](/docs/concepts/tool_calling) and other features for this problem. Below we will go through both `MapRerankDocumentsChain` and a corresponding LangGraph implementation on a simple example for illustrative purposes."
|
||||
]
|
||||
|
||||
@@ -140,6 +140,17 @@ def get_vectorstore_table():
|
||||
"Local/Cloud": "Local",
|
||||
"IDs in add Documents": True,
|
||||
},
|
||||
"openGauss": {
|
||||
"Delete by ID": True,
|
||||
"Filtering": True,
|
||||
"similarity_search_by_vector": True,
|
||||
"similarity_search_with_score": True,
|
||||
"asearch": False,
|
||||
"Passes Standard Tests": True,
|
||||
"Multi Tenancy": False,
|
||||
"Local/Cloud": "Local",
|
||||
"IDs in add Documents": True,
|
||||
},
|
||||
"QdrantVectorStore": {
|
||||
"Delete by ID": True,
|
||||
"Filtering": True,
|
||||
|
||||
@@ -1130,6 +1130,19 @@ const FEATURE_TABLES = {
|
||||
local: true,
|
||||
idsInAddDocuments: false,
|
||||
},
|
||||
{
|
||||
name: "openGauss",
|
||||
link: "openGauss",
|
||||
deleteById: true,
|
||||
filtering: true,
|
||||
searchByVector: true,
|
||||
searchWithScore: true,
|
||||
async: false,
|
||||
passesStandardTests: true,
|
||||
multiTenancy: false,
|
||||
local: true,
|
||||
idsInAddDocuments: true,
|
||||
},
|
||||
{
|
||||
name: "PGVector",
|
||||
link: "pgvector",
|
||||
|
||||
@@ -134,6 +134,18 @@
|
||||
"source": "/docs/integrations/retrievers/weaviate-hybrid(/?)",
|
||||
"destination": "/docs/integrations/vectorstores/weaviate/#search-mechanism"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/vectorstores/singlestoredb(/?)",
|
||||
"destination": "https://python.langchain.com/v0.2/docs/integrations/vectorstores/singlestoredb/"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/providers/singlestoredb(/?)",
|
||||
"destination": "/docs/integrations/providers/singlestore/"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/retrievers/singlestoredb(/?)",
|
||||
"destination": "https://python.langchain.com/v0.2/docs/integrations/retrievers/singlestoredb/"
|
||||
},
|
||||
{
|
||||
"source": "/api_reference/mongodb/:path(.*/?)*",
|
||||
"destination": "https://langchain-mongodb.readthedocs.io/en/latest/langchain_mongodb/api_docs.html"
|
||||
|
||||
@@ -196,7 +196,7 @@ def create_sql_agent(
|
||||
]
|
||||
prompt = ChatPromptTemplate.from_messages(messages)
|
||||
agent = RunnableAgent(
|
||||
runnable=create_openai_functions_agent(llm, tools, prompt), # type: ignore
|
||||
runnable=create_openai_functions_agent(llm, tools, prompt), # type: ignore[arg-type]
|
||||
input_keys_arg=["input"],
|
||||
return_keys_arg=["output"],
|
||||
**kwargs,
|
||||
@@ -211,9 +211,9 @@ def create_sql_agent(
|
||||
]
|
||||
prompt = ChatPromptTemplate.from_messages(messages)
|
||||
if agent_type == "openai-tools":
|
||||
runnable = create_openai_tools_agent(llm, tools, prompt) # type: ignore
|
||||
runnable = create_openai_tools_agent(llm, tools, prompt) # type: ignore[arg-type]
|
||||
else:
|
||||
runnable = create_tool_calling_agent(llm, tools, prompt) # type: ignore
|
||||
runnable = create_tool_calling_agent(llm, tools, prompt) # type: ignore[arg-type]
|
||||
agent = RunnableMultiActionAgent( # type: ignore[assignment]
|
||||
runnable=runnable,
|
||||
input_keys_arg=["input"],
|
||||
|
||||
@@ -135,7 +135,7 @@ def _get_assistants_tool(
|
||||
Dict[str, Any]: A dictionary of tools that are converted into OpenAI tools.
|
||||
"""
|
||||
if _is_assistants_builtin_tool(tool):
|
||||
return tool # type: ignore
|
||||
return tool # type: ignore[return-value]
|
||||
else:
|
||||
return convert_to_openai_tool(tool)
|
||||
|
||||
@@ -288,7 +288,7 @@ class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable):
|
||||
assistant = client.beta.assistants.create(
|
||||
name=name,
|
||||
instructions=instructions,
|
||||
tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore
|
||||
tools=[_get_assistants_tool(tool) for tool in tools],
|
||||
tool_resources=tool_resources, # type: ignore[arg-type]
|
||||
model=model,
|
||||
extra_body=extra_body,
|
||||
@@ -430,7 +430,7 @@ class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable):
|
||||
assistant = await async_client.beta.assistants.create(
|
||||
name=name,
|
||||
instructions=instructions,
|
||||
tools=openai_tools, # type: ignore
|
||||
tools=openai_tools,
|
||||
tool_resources=tool_resources, # type: ignore[arg-type]
|
||||
model=model,
|
||||
)
|
||||
|
||||
@@ -238,7 +238,7 @@ class InMemoryCache(BaseCache):
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class FullLLMCache(Base): # type: ignore
|
||||
class FullLLMCache(Base): # type: ignore[misc,valid-type]
|
||||
"""SQLite table for full LLM Cache (all generations)."""
|
||||
|
||||
__tablename__ = "full_llm_cache"
|
||||
@@ -261,7 +261,7 @@ class SQLAlchemyCache(BaseCache):
|
||||
"""Look up based on prompt and llm_string."""
|
||||
stmt = (
|
||||
select(self.cache_schema.response)
|
||||
.where(self.cache_schema.prompt == prompt) # type: ignore
|
||||
.where(self.cache_schema.prompt == prompt)
|
||||
.where(self.cache_schema.llm == llm_string)
|
||||
.order_by(self.cache_schema.idx)
|
||||
)
|
||||
@@ -1348,17 +1348,14 @@ class CassandraSemanticCache(BaseCache):
|
||||
return await self.embedding.aembed_query(text=text)
|
||||
|
||||
self._aget_embedding = _acache_embedding
|
||||
|
||||
kwargs = {}
|
||||
embedding_dimension: Union[int, Awaitable[int], None] = None
|
||||
if setup_mode == CassandraSetupMode.ASYNC:
|
||||
embedding_dimension = self._aget_embedding_dimension()
|
||||
kwargs["async_setup"] = True
|
||||
elif setup_mode == CassandraSetupMode.SYNC:
|
||||
embedding_dimension = self._get_embedding_dimension()
|
||||
|
||||
kwargs = {}
|
||||
if setup_mode == CassandraSetupMode.ASYNC:
|
||||
kwargs["async_setup"] = True
|
||||
|
||||
self.table = MetadataVectorCassandraTable(
|
||||
session=self.session,
|
||||
keyspace=self.keyspace,
|
||||
@@ -1534,7 +1531,7 @@ class CassandraSemanticCache(BaseCache):
|
||||
await self.table.aclear()
|
||||
|
||||
|
||||
class FullMd5LLMCache(Base): # type: ignore
|
||||
class FullMd5LLMCache(Base): # type: ignore[misc,valid-type]
|
||||
"""SQLite table for full LLM Cache (all generations)."""
|
||||
|
||||
__tablename__ = "full_md5_llm_cache"
|
||||
@@ -1586,7 +1583,7 @@ class SQLAlchemyMd5Cache(BaseCache):
|
||||
def _delete_previous(self, session: Session, prompt: str, llm_string: str) -> None:
|
||||
stmt = (
|
||||
delete(self.cache_schema)
|
||||
.where(self.cache_schema.prompt_md5 == self.get_md5(prompt)) # type: ignore
|
||||
.where(self.cache_schema.prompt_md5 == self.get_md5(prompt))
|
||||
.where(self.cache_schema.llm == llm_string)
|
||||
.where(self.cache_schema.prompt == prompt)
|
||||
)
|
||||
@@ -1596,7 +1593,7 @@ class SQLAlchemyMd5Cache(BaseCache):
|
||||
prompt_pd5 = self.get_md5(prompt)
|
||||
stmt = (
|
||||
select(self.cache_schema.response)
|
||||
.where(self.cache_schema.prompt_md5 == prompt_pd5) # type: ignore
|
||||
.where(self.cache_schema.prompt_md5 == prompt_pd5)
|
||||
.where(self.cache_schema.llm == llm_string)
|
||||
.where(self.cache_schema.prompt == prompt)
|
||||
.order_by(self.cache_schema.idx)
|
||||
@@ -1799,7 +1796,7 @@ class _CachedAwaitable:
|
||||
def __await__(self) -> Generator:
|
||||
if self.result is _unset:
|
||||
self.result = yield from self.awaitable.__await__()
|
||||
return self.result # type: ignore
|
||||
return self.result # type: ignore[return-value]
|
||||
|
||||
|
||||
def _reawaitable(func: Callable) -> Callable:
|
||||
@@ -2492,6 +2489,18 @@ class OpenSearchSemanticCache(BaseCache):
|
||||
del self._cache_dict[index_name]
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.3.22",
|
||||
message=(
|
||||
"This class is pending deprecation and may be removed in a future version. "
|
||||
"You can swap to using the `SingleStoreSemanticCache` "
|
||||
"implementation in `langchain_singlestore`. "
|
||||
"See <https://github.com/singlestore-labs/langchain-singlestore> for details "
|
||||
" about the new implementation."
|
||||
),
|
||||
alternative="from langchain_singlestore import SingleStoreSemanticCache",
|
||||
pending=True,
|
||||
)
|
||||
class SingleStoreDBSemanticCache(BaseCache):
|
||||
"""Cache that uses SingleStore DB as a backend"""
|
||||
|
||||
|
||||
@@ -584,7 +584,7 @@ class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
)
|
||||
_custom_metrics = custom_metrics if custom_metrics else self.custom_metrics
|
||||
|
||||
self.__init__( # type: ignore
|
||||
self.__init__( # type: ignore[misc]
|
||||
task_type=_task_type,
|
||||
workspace=_workspace,
|
||||
project_name=_project_name,
|
||||
|
||||
@@ -10,6 +10,38 @@ from langchain_core.messages import AIMessage
|
||||
from langchain_core.outputs import ChatGeneration, LLMResult
|
||||
|
||||
MODEL_COST_PER_1K_TOKENS = {
|
||||
# GPT-4.1 input
|
||||
"gpt-4.1": 0.002,
|
||||
"gpt-4.1-2025-04-14": 0.002,
|
||||
"gpt-4.1-cached": 0.0005,
|
||||
"gpt-4.1-2025-04-14-cached": 0.0005,
|
||||
# GPT-4.1 output
|
||||
"gpt-4.1-completion": 0.008,
|
||||
"gpt-4.1-2025-04-14-completion": 0.008,
|
||||
# GPT-4.1-mini input
|
||||
"gpt-4.1-mini": 0.0004,
|
||||
"gpt-4.1-mini-2025-04-14": 0.0004,
|
||||
"gpt-4.1-mini-cached": 0.0001,
|
||||
"gpt-4.1-mini-2025-04-14-cached": 0.0001,
|
||||
# GPT-4.1-mini output
|
||||
"gpt-4.1-mini-completion": 0.0016,
|
||||
"gpt-4.1-mini-2025-04-14-completion": 0.0016,
|
||||
# GPT-4.1-nano input
|
||||
"gpt-4.1-nano": 0.0001,
|
||||
"gpt-4.1-nano-2025-04-14": 0.0001,
|
||||
"gpt-4.1-nano-cached": 0.000025,
|
||||
"gpt-4.1-nano-2025-04-14-cached": 0.000025,
|
||||
# GPT-4.1-nano output
|
||||
"gpt-4.1-nano-completion": 0.0004,
|
||||
"gpt-4.1-nano-2025-04-14-completion": 0.0004,
|
||||
# GPT-4.5-preview input
|
||||
"gpt-4.5-preview": 0.075,
|
||||
"gpt-4.5-preview-2025-02-27": 0.075,
|
||||
"gpt-4.5-preview-cached": 0.0375,
|
||||
"gpt-4.5-preview-2025-02-27-cached": 0.0375,
|
||||
# GPT-4.5-preview output
|
||||
"gpt-4.5-preview-completion": 0.15,
|
||||
"gpt-4.5-preview-2025-02-27-completion": 0.15,
|
||||
# OpenAI o1 input
|
||||
"o1": 0.015,
|
||||
"o1-2024-12-17": 0.015,
|
||||
@@ -18,6 +50,28 @@ MODEL_COST_PER_1K_TOKENS = {
|
||||
# OpenAI o1 output
|
||||
"o1-completion": 0.06,
|
||||
"o1-2024-12-17-completion": 0.06,
|
||||
# OpenAI o1-pro input
|
||||
"o1-pro": 0.15,
|
||||
"o1-pro-2025-03-19": 0.15,
|
||||
# OpenAI o1-pro output
|
||||
"o1-pro-completion": 0.6,
|
||||
"o1-pro-2025-03-19-completion": 0.6,
|
||||
# OpenAI o3 input
|
||||
"o3": 0.01,
|
||||
"o3-2025-04-16": 0.01,
|
||||
"o3-cached": 0.0025,
|
||||
"o3-2025-04-16-cached": 0.0025,
|
||||
# OpenAI o3 output
|
||||
"o3-completion": 0.04,
|
||||
"o3-2025-04-16-completion": 0.04,
|
||||
# OpenAI o4-mini input
|
||||
"o4-mini": 0.0011,
|
||||
"o4-mini-2025-04-16": 0.0011,
|
||||
"o4-mini-cached": 0.000275,
|
||||
"o4-mini-2025-04-16-cached": 0.000275,
|
||||
# OpenAI o4-mini output
|
||||
"o4-mini-completion": 0.0044,
|
||||
"o4-mini-2025-04-16-completion": 0.0044,
|
||||
# OpenAI o3-mini input
|
||||
"o3-mini": 0.0011,
|
||||
"o3-mini-2025-01-31": 0.0011,
|
||||
@@ -26,6 +80,14 @@ MODEL_COST_PER_1K_TOKENS = {
|
||||
# OpenAI o3-mini output
|
||||
"o3-mini-completion": 0.0044,
|
||||
"o3-mini-2025-01-31-completion": 0.0044,
|
||||
# OpenAI o1-mini input (updated pricing)
|
||||
"o1-mini": 0.0011,
|
||||
"o1-mini-cached": 0.00055,
|
||||
"o1-mini-2024-09-12": 0.0011,
|
||||
"o1-mini-2024-09-12-cached": 0.00055,
|
||||
# OpenAI o1-mini output (updated pricing)
|
||||
"o1-mini-completion": 0.0044,
|
||||
"o1-mini-2024-09-12-completion": 0.0044,
|
||||
# OpenAI o1-preview input
|
||||
"o1-preview": 0.015,
|
||||
"o1-preview-cached": 0.0075,
|
||||
@@ -34,22 +96,6 @@ MODEL_COST_PER_1K_TOKENS = {
|
||||
# OpenAI o1-preview output
|
||||
"o1-preview-completion": 0.06,
|
||||
"o1-preview-2024-09-12-completion": 0.06,
|
||||
# OpenAI o1-mini input
|
||||
"o1-mini": 0.003,
|
||||
"o1-mini-cached": 0.0015,
|
||||
"o1-mini-2024-09-12": 0.003,
|
||||
"o1-mini-2024-09-12-cached": 0.0015,
|
||||
# OpenAI o1-mini output
|
||||
"o1-mini-completion": 0.012,
|
||||
"o1-mini-2024-09-12-completion": 0.012,
|
||||
# GPT-4o-mini input
|
||||
"gpt-4o-mini": 0.00015,
|
||||
"gpt-4o-mini-cached": 0.000075,
|
||||
"gpt-4o-mini-2024-07-18": 0.00015,
|
||||
"gpt-4o-mini-2024-07-18-cached": 0.000075,
|
||||
# GPT-4o-mini output
|
||||
"gpt-4o-mini-completion": 0.0006,
|
||||
"gpt-4o-mini-2024-07-18-completion": 0.0006,
|
||||
# GPT-4o input
|
||||
"gpt-4o": 0.0025,
|
||||
"gpt-4o-cached": 0.00125,
|
||||
@@ -63,6 +109,65 @@ MODEL_COST_PER_1K_TOKENS = {
|
||||
"gpt-4o-2024-05-13-completion": 0.015,
|
||||
"gpt-4o-2024-08-06-completion": 0.01,
|
||||
"gpt-4o-2024-11-20-completion": 0.01,
|
||||
# GPT-4o-audio-preview input
|
||||
"gpt-4o-audio-preview": 0.0025,
|
||||
"gpt-4o-audio-preview-2024-12-17": 0.0025,
|
||||
"gpt-4o-audio-preview-2024-10-01": 0.0025,
|
||||
# GPT-4o-audio-preview output
|
||||
"gpt-4o-audio-preview-completion": 0.01,
|
||||
"gpt-4o-audio-preview-2024-12-17-completion": 0.01,
|
||||
"gpt-4o-audio-preview-2024-10-01-completion": 0.01,
|
||||
# GPT-4o-realtime-preview input
|
||||
"gpt-4o-realtime-preview": 0.005,
|
||||
"gpt-4o-realtime-preview-2024-12-17": 0.005,
|
||||
"gpt-4o-realtime-preview-2024-10-01": 0.005,
|
||||
"gpt-4o-realtime-preview-cached": 0.0025,
|
||||
"gpt-4o-realtime-preview-2024-12-17-cached": 0.0025,
|
||||
"gpt-4o-realtime-preview-2024-10-01-cached": 0.0025,
|
||||
# GPT-4o-realtime-preview output
|
||||
"gpt-4o-realtime-preview-completion": 0.02,
|
||||
"gpt-4o-realtime-preview-2024-12-17-completion": 0.02,
|
||||
"gpt-4o-realtime-preview-2024-10-01-completion": 0.02,
|
||||
# GPT-4o-mini input
|
||||
"gpt-4o-mini": 0.00015,
|
||||
"gpt-4o-mini-cached": 0.000075,
|
||||
"gpt-4o-mini-2024-07-18": 0.00015,
|
||||
"gpt-4o-mini-2024-07-18-cached": 0.000075,
|
||||
# GPT-4o-mini output
|
||||
"gpt-4o-mini-completion": 0.0006,
|
||||
"gpt-4o-mini-2024-07-18-completion": 0.0006,
|
||||
# GPT-4o-mini-audio-preview input
|
||||
"gpt-4o-mini-audio-preview": 0.00015,
|
||||
"gpt-4o-mini-audio-preview-2024-12-17": 0.00015,
|
||||
# GPT-4o-mini-audio-preview output
|
||||
"gpt-4o-mini-audio-preview-completion": 0.0006,
|
||||
"gpt-4o-mini-audio-preview-2024-12-17-completion": 0.0006,
|
||||
# GPT-4o-mini-realtime-preview input
|
||||
"gpt-4o-mini-realtime-preview": 0.0006,
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17": 0.0006,
|
||||
"gpt-4o-mini-realtime-preview-cached": 0.0003,
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17-cached": 0.0003,
|
||||
# GPT-4o-mini-realtime-preview output
|
||||
"gpt-4o-mini-realtime-preview-completion": 0.0024,
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17-completion": 0.0024,
|
||||
# GPT-4o-mini-search-preview input
|
||||
"gpt-4o-mini-search-preview": 0.00015,
|
||||
"gpt-4o-mini-search-preview-2025-03-11": 0.00015,
|
||||
# GPT-4o-mini-search-preview output
|
||||
"gpt-4o-mini-search-preview-completion": 0.0006,
|
||||
"gpt-4o-mini-search-preview-2025-03-11-completion": 0.0006,
|
||||
# GPT-4o-search-preview input
|
||||
"gpt-4o-search-preview": 0.0025,
|
||||
"gpt-4o-search-preview-2025-03-11": 0.0025,
|
||||
# GPT-4o-search-preview output
|
||||
"gpt-4o-search-preview-completion": 0.01,
|
||||
"gpt-4o-search-preview-2025-03-11-completion": 0.01,
|
||||
# Computer-use-preview input
|
||||
"computer-use-preview": 0.003,
|
||||
"computer-use-preview-2025-03-11": 0.003,
|
||||
# Computer-use-preview output
|
||||
"computer-use-preview-completion": 0.012,
|
||||
"computer-use-preview-2025-03-11-completion": 0.012,
|
||||
# GPT-4 input
|
||||
"gpt-4": 0.03,
|
||||
"gpt-4-0314": 0.03,
|
||||
@@ -218,12 +323,20 @@ def standardize_model_name(
|
||||
or model_name.startswith("gpt-3.5")
|
||||
or model_name.startswith("gpt-35")
|
||||
or model_name.startswith("o1-")
|
||||
or model_name.startswith("o3-")
|
||||
or model_name.startswith("o4-")
|
||||
or ("finetuned" in model_name and "legacy" not in model_name)
|
||||
):
|
||||
return model_name + "-completion"
|
||||
if (
|
||||
token_type == TokenType.PROMPT_CACHED
|
||||
and (model_name.startswith("gpt-4o") or model_name.startswith("o1"))
|
||||
and (
|
||||
model_name.startswith("gpt-4o")
|
||||
or model_name.startswith("gpt-4.1")
|
||||
or model_name.startswith("o1")
|
||||
or model_name.startswith("o3")
|
||||
or model_name.startswith("o4")
|
||||
)
|
||||
and not (model_name.startswith("gpt-4o-2024-05-13"))
|
||||
):
|
||||
return model_name + "-cached"
|
||||
|
||||
@@ -580,7 +580,7 @@ class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
self.temp_dir.cleanup()
|
||||
self.reset_callback_meta()
|
||||
if reset:
|
||||
self.__init__( # type: ignore
|
||||
self.__init__( # type: ignore[misc]
|
||||
job_type=job_type if job_type else self.job_type,
|
||||
project=project if project else self.project,
|
||||
entity=entity if entity else self.entity,
|
||||
|
||||
@@ -352,7 +352,7 @@ def create_structured_output_runnable(
|
||||
class _OutputFormatter(BaseModel):
|
||||
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
|
||||
|
||||
output: output_schema # type: ignore
|
||||
output: output_schema # type: ignore[valid-type]
|
||||
|
||||
function = _OutputFormatter
|
||||
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
|
||||
@@ -537,7 +537,7 @@ def create_structured_output_chain(
|
||||
class _OutputFormatter(BaseModel):
|
||||
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
|
||||
|
||||
output: output_schema # type: ignore
|
||||
output: output_schema # type: ignore[valid-type]
|
||||
|
||||
function = _OutputFormatter
|
||||
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
|
||||
|
||||
@@ -316,7 +316,7 @@ class GraphCypherQAChain(Chain):
|
||||
MessagesPlaceholder(variable_name="function_response"),
|
||||
]
|
||||
)
|
||||
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore
|
||||
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore[operator]
|
||||
except (NotImplementedError, AttributeError):
|
||||
raise ValueError("Provided LLM does not support native tools/functions")
|
||||
else:
|
||||
@@ -404,15 +404,15 @@ class GraphCypherQAChain(Chain):
|
||||
intermediate_steps.append({"context": context})
|
||||
if self.use_function_response:
|
||||
function_response = get_function_response(question, context)
|
||||
final_result = self.qa_chain.invoke( # type: ignore
|
||||
final_result = self.qa_chain.invoke( # type: ignore[assignment]
|
||||
{"question": question, "function_response": function_response},
|
||||
)
|
||||
else:
|
||||
result = self.qa_chain.invoke( # type: ignore
|
||||
result = self.qa_chain.invoke(
|
||||
{"question": question, "context": context},
|
||||
callbacks=callbacks,
|
||||
)
|
||||
final_result = result[self.qa_chain.output_key] # type: ignore
|
||||
final_result = result[self.qa_chain.output_key] # type: ignore[union-attr]
|
||||
|
||||
chain_result: Dict[str, Any] = {self.output_key: final_result}
|
||||
if self.return_intermediate_steps:
|
||||
|
||||
@@ -225,11 +225,11 @@ class MemgraphQAChain(Chain):
|
||||
MessagesPlaceholder(variable_name="function_response"),
|
||||
]
|
||||
)
|
||||
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore
|
||||
qa_chain = response_prompt | qa_llm | StrOutputParser() # type: ignore[operator]
|
||||
except (NotImplementedError, AttributeError):
|
||||
raise ValueError("Provided LLM does not support native tools/functions")
|
||||
else:
|
||||
qa_chain = use_qa_llm_kwargs["prompt"] | qa_llm | StrOutputParser() # type: ignore
|
||||
qa_chain = use_qa_llm_kwargs["prompt"] | qa_llm | StrOutputParser()
|
||||
|
||||
prompt = use_cypher_llm_kwargs["prompt"]
|
||||
llm_to_use = cypher_llm if cypher_llm is not None else llm
|
||||
@@ -300,11 +300,11 @@ class MemgraphQAChain(Chain):
|
||||
intermediate_steps.append({"context": context})
|
||||
if self.use_function_response:
|
||||
function_response = get_function_response(question, context)
|
||||
result = self.qa_chain.invoke( # type: ignore
|
||||
result = self.qa_chain.invoke(
|
||||
{"question": question, "function_response": function_response},
|
||||
)
|
||||
else:
|
||||
result = self.qa_chain.invoke( # type: ignore
|
||||
result = self.qa_chain.invoke(
|
||||
{"question": question, "context": context},
|
||||
callbacks=callbacks,
|
||||
)
|
||||
|
||||
@@ -67,11 +67,11 @@ def extract_cypher(text: str) -> str:
|
||||
|
||||
def use_simple_prompt(llm: BaseLanguageModel) -> bool:
|
||||
"""Decides whether to use the simple prompt"""
|
||||
if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore
|
||||
if llm._llm_type and "anthropic" in llm._llm_type: # type: ignore[attr-defined]
|
||||
return True
|
||||
|
||||
# Bedrock anthropic
|
||||
if hasattr(llm, "model_id") and "anthropic" in llm.model_id: # type: ignore
|
||||
if hasattr(llm, "model_id") and "anthropic" in llm.model_id:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -313,8 +313,12 @@ class PebbloRetrievalQA(Chain):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _get_app_details( # type: ignore
|
||||
app_name: str, owner: str, description: str, llm: BaseLanguageModel, **kwargs
|
||||
def _get_app_details(
|
||||
app_name: str,
|
||||
owner: str,
|
||||
description: str,
|
||||
llm: BaseLanguageModel,
|
||||
**kwargs: Any,
|
||||
) -> App:
|
||||
"""Fetch app details. Internal method.
|
||||
Returns:
|
||||
|
||||
@@ -81,7 +81,7 @@ class CassandraChatMessageHistory(BaseChatMessageHistory):
|
||||
)
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve all session messages from DB"""
|
||||
# The latest are returned, in chronological order
|
||||
rows = self.table.get_partition(
|
||||
|
||||
@@ -35,7 +35,7 @@ class FileChatMessageHistory(BaseChatMessageHistory):
|
||||
)
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve the messages from the local file"""
|
||||
items = json.loads(self.file_path.read_text(encoding=self.encoding))
|
||||
messages = messages_from_dict(items)
|
||||
|
||||
@@ -334,7 +334,7 @@ class KafkaChatMessageHistory(BaseChatMessageHistory):
|
||||
)
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""
|
||||
Retrieve the messages for the session, from Kafka topic continuously
|
||||
from last consumed message. This method is stateful and maintains
|
||||
|
||||
@@ -60,7 +60,7 @@ class MongoDBChatMessageHistory(BaseChatMessageHistory):
|
||||
self.collection.create_index("SessionId")
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve the messages from MongoDB"""
|
||||
from pymongo import errors
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ class PostgresChatMessageHistory(BaseChatMessageHistory):
|
||||
self.connection.commit()
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve the messages from PostgreSQL"""
|
||||
query = (
|
||||
f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;"
|
||||
|
||||
@@ -215,7 +215,7 @@ class RocksetChatMessageHistory(BaseChatMessageHistory):
|
||||
self._create_empty_doc()
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Messages in this chat history."""
|
||||
return messages_from_dict(
|
||||
self._query(
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import (
|
||||
List,
|
||||
)
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.chat_history import BaseChatMessageHistory
|
||||
from langchain_core.messages import (
|
||||
BaseMessage,
|
||||
@@ -16,6 +17,18 @@ from langchain_core.messages import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.3.22",
|
||||
message=(
|
||||
"This class is pending deprecation and may be removed in a future version. "
|
||||
"You can swap to using the `SingleStoreChatMessageHistory` "
|
||||
"implementation in `langchain_singlestore`. "
|
||||
"See <https://github.com/singlestore-labs/langchain-singlestore> for details "
|
||||
" about the new implementation."
|
||||
),
|
||||
alternative="from langchain_singlestore import SingleStoreChatMessageHistory",
|
||||
pending=True,
|
||||
)
|
||||
class SingleStoreDBChatMessageHistory(BaseChatMessageHistory):
|
||||
"""Chat message history stored in a SingleStoreDB database."""
|
||||
|
||||
@@ -212,7 +225,7 @@ class SingleStoreDBChatMessageHistory(BaseChatMessageHistory):
|
||||
conn.close()
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve the messages from SingleStoreDB"""
|
||||
self._create_table_if_not_exists()
|
||||
conn = self.connection_pool.connect()
|
||||
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
from sqlalchemy.ext.asyncio import async_sessionmaker
|
||||
except ImportError:
|
||||
# dummy for sqlalchemy < 2
|
||||
async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore
|
||||
async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore[assignment,misc]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -242,7 +242,7 @@ class SQLChatMessageHistory(BaseChatMessageHistory):
|
||||
self._table_created = True
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve all messages from db"""
|
||||
with self._make_sync_session() as session:
|
||||
result = (
|
||||
|
||||
@@ -51,7 +51,7 @@ class UpstashRedisChatMessageHistory(BaseChatMessageHistory):
|
||||
return self.key_prefix + self.session_id
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve the messages from Upstash Redis"""
|
||||
_items = self.redis_client.lrange(self.key, 0, -1)
|
||||
items = [json.loads(m) for m in _items[::-1]]
|
||||
|
||||
@@ -83,7 +83,7 @@ class XataChatMessageHistory(BaseChatMessageHistory):
|
||||
raise Exception(f"Error adding message to Xata: {r.status_code} {r}")
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
r = self._client.data().query(
|
||||
self._table_name,
|
||||
payload={
|
||||
|
||||
@@ -87,7 +87,7 @@ class ZepChatMessageHistory(BaseChatMessageHistory):
|
||||
self.session_id = session_id
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve messages from Zep memory"""
|
||||
zep_memory: Optional[Memory] = self._get_memory()
|
||||
if not zep_memory:
|
||||
|
||||
@@ -134,7 +134,7 @@ class ZepCloudChatMessageHistory(BaseChatMessageHistory):
|
||||
self.summary_instruction = summary_instruction
|
||||
|
||||
@property
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore
|
||||
def messages(self) -> List[BaseMessage]: # type: ignore[override]
|
||||
"""Retrieve messages from Zep memory"""
|
||||
zep_memory: Optional[Memory] = self._get_memory()
|
||||
if not zep_memory:
|
||||
|
||||
@@ -452,6 +452,7 @@ class ChatLiteLLM(BaseChatModel):
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
|
||||
default_chunk_class = AIMessageChunk
|
||||
added_model_name = False
|
||||
for chunk in self.completion_with_retry(
|
||||
messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
@@ -460,7 +461,15 @@ class ChatLiteLLM(BaseChatModel):
|
||||
if len(chunk["choices"]) == 0:
|
||||
continue
|
||||
delta = chunk["choices"][0]["delta"]
|
||||
usage = chunk.get("usage", {})
|
||||
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
|
||||
if isinstance(chunk, AIMessageChunk):
|
||||
if not added_model_name:
|
||||
chunk.response_metadata = {
|
||||
"model_name": self.model_name or self.model
|
||||
}
|
||||
added_model_name = True
|
||||
chunk.usage_metadata = _create_usage_metadata(usage)
|
||||
default_chunk_class = chunk.__class__
|
||||
cg_chunk = ChatGenerationChunk(message=chunk)
|
||||
if run_manager:
|
||||
@@ -478,6 +487,7 @@ class ChatLiteLLM(BaseChatModel):
|
||||
params = {**params, **kwargs, "stream": True}
|
||||
|
||||
default_chunk_class = AIMessageChunk
|
||||
added_model_name = False
|
||||
async for chunk in await acompletion_with_retry(
|
||||
self, messages=message_dicts, run_manager=run_manager, **params
|
||||
):
|
||||
@@ -486,7 +496,15 @@ class ChatLiteLLM(BaseChatModel):
|
||||
if len(chunk["choices"]) == 0:
|
||||
continue
|
||||
delta = chunk["choices"][0]["delta"]
|
||||
usage = chunk.get("usage", {})
|
||||
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
|
||||
if isinstance(chunk, AIMessageChunk):
|
||||
if not added_model_name:
|
||||
chunk.response_metadata = {
|
||||
"model_name": self.model_name or self.model
|
||||
}
|
||||
added_model_name = True
|
||||
chunk.usage_metadata = _create_usage_metadata(usage)
|
||||
default_chunk_class = chunk.__class__
|
||||
cg_chunk = ChatGenerationChunk(message=chunk)
|
||||
if run_manager:
|
||||
|
||||
@@ -42,7 +42,7 @@ class ChatLiteLLMRouter(ChatLiteLLM):
|
||||
|
||||
def __init__(self, *, router: Any, **kwargs: Any) -> None:
|
||||
"""Construct Chat LiteLLM Router."""
|
||||
super().__init__(router=router, **kwargs) # type: ignore
|
||||
super().__init__(router=router, **kwargs) # type: ignore[call-arg]
|
||||
self.router = router
|
||||
|
||||
@property
|
||||
|
||||
@@ -815,4 +815,4 @@ def _convert_delta_to_message_chunk(
|
||||
elif role or default_class == ChatMessageChunk:
|
||||
return ChatMessageChunk(content=content, role=role, id=id_)
|
||||
else:
|
||||
return default_class(content=content, id=id_) # type: ignore
|
||||
return default_class(content=content, id=id_) # type: ignore[call-arg]
|
||||
|
||||
@@ -716,7 +716,7 @@ class ChatOCIGenAI(BaseChatModel, OCIGenAIBase):
|
||||
if is_pydantic_schema:
|
||||
output_parser: OutputParserLike = PydanticToolsParser(
|
||||
tools=[schema], # type: ignore[list-item]
|
||||
first_tool_only=True, # type: ignore[list-item]
|
||||
first_tool_only=True,
|
||||
)
|
||||
else:
|
||||
output_parser = JsonOutputKeyToolsParser(
|
||||
|
||||
@@ -158,9 +158,9 @@ def _convert_delta_response_to_message_chunk(
|
||||
Optional[str],
|
||||
]:
|
||||
"""Converts delta response to message chunk"""
|
||||
_delta = response.choices[0].delta # type: ignore
|
||||
role = _delta.get("role", "") # type: ignore
|
||||
content = _delta.get("content", "") # type: ignore
|
||||
_delta = response.choices[0].delta
|
||||
role = _delta.get("role", "")
|
||||
content = _delta.get("content", "")
|
||||
additional_kwargs: Dict = {}
|
||||
finish_reasons: Optional[str] = response.choices[0].finish_reason
|
||||
|
||||
@@ -398,7 +398,7 @@ class ChatPremAI(BaseChatModel, BaseModel):
|
||||
messages, template_id=kwargs["template_id"]
|
||||
)
|
||||
else:
|
||||
system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) # type: ignore
|
||||
system_prompt, messages_to_pass = _messages_to_prompt_dict(messages)
|
||||
|
||||
if system_prompt is not None and system_prompt != "":
|
||||
kwargs["system_prompt"] = system_prompt
|
||||
@@ -425,9 +425,9 @@ class ChatPremAI(BaseChatModel, BaseModel):
|
||||
if "template_id" in kwargs:
|
||||
system_prompt, messages_to_pass = _messages_to_prompt_dict(
|
||||
messages, template_id=kwargs["template_id"]
|
||||
) # type: ignore
|
||||
)
|
||||
else:
|
||||
system_prompt, messages_to_pass = _messages_to_prompt_dict(messages) # type: ignore
|
||||
system_prompt, messages_to_pass = _messages_to_prompt_dict(messages)
|
||||
|
||||
if stop is not None:
|
||||
logger.warning("stop is not supported in langchain streaming")
|
||||
|
||||
@@ -218,7 +218,7 @@ class BlackboardLoader(WebBaseLoader):
|
||||
loader = DirectoryLoader(
|
||||
path=self.folder_path,
|
||||
glob="*.pdf",
|
||||
loader_cls=PyPDFLoader, # type: ignore
|
||||
loader_cls=PyPDFLoader, # type: ignore[arg-type]
|
||||
)
|
||||
# Load the documents
|
||||
documents = loader.load()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user