mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-18 04:25:22 +00:00
Compare commits
410 Commits
erick/rele
...
bagatur/re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d5e2bf462a | ||
|
|
77d3af43d7 | ||
|
|
7b214ee83d | ||
|
|
bc6600d86f | ||
|
|
3b0e993e20 | ||
|
|
5b5a782196 | ||
|
|
86b6d9df9a | ||
|
|
d6e9d6fee1 | ||
|
|
4b7a7ed4b1 | ||
|
|
be7a5aa726 | ||
|
|
27170b08da | ||
|
|
6ed6c7f409 | ||
|
|
cd17444711 | ||
|
|
3a5523bf82 | ||
|
|
f883981446 | ||
|
|
b8550e7d3a | ||
|
|
946a0a38bd | ||
|
|
5a40413bfd | ||
|
|
cb45caa02e | ||
|
|
bc84bc44b6 | ||
|
|
b199db9767 | ||
|
|
91c6117fd5 | ||
|
|
65e6239f14 | ||
|
|
4bf7add9b8 | ||
|
|
c9ec8c5d07 | ||
|
|
d53de65aca | ||
|
|
f02e27b664 | ||
|
|
a79c8b3834 | ||
|
|
4ea6f882d4 | ||
|
|
03bd0f51c9 | ||
|
|
be3d5904da | ||
|
|
9d850054bc | ||
|
|
5c07a96de1 | ||
|
|
7440ce0cb4 | ||
|
|
73a5b3dd98 | ||
|
|
cfd827b574 | ||
|
|
a71c4e22d1 | ||
|
|
29be604023 | ||
|
|
25f8467ffb | ||
|
|
0c84aea1d4 | ||
|
|
d0f043a765 | ||
|
|
264f677528 | ||
|
|
3b99f428a0 | ||
|
|
6eab0a4709 | ||
|
|
be8006fa9c | ||
|
|
e316d30dcb | ||
|
|
61b5159ed0 | ||
|
|
ee35b9ba56 | ||
|
|
710e57d779 | ||
|
|
6a295d1ec0 | ||
|
|
7926cc1929 | ||
|
|
0715545378 | ||
|
|
5b00885b49 | ||
|
|
aafaf3e193 | ||
|
|
3c31bd0ed0 | ||
|
|
dd70f2f473 | ||
|
|
bbdf0f8801 | ||
|
|
e4aca0d052 | ||
|
|
893f06b5de | ||
|
|
225ceedcb6 | ||
|
|
af97d58c9e | ||
|
|
ca753e7c15 | ||
|
|
791d59a2c8 | ||
|
|
416549bed2 | ||
|
|
7994cba18d | ||
|
|
7cbf1c31aa | ||
|
|
d5bde4fa91 | ||
|
|
ad0f3c14c2 | ||
|
|
6a1d61dbf1 | ||
|
|
80170da6c5 | ||
|
|
04e2611fea | ||
|
|
c34419e200 | ||
|
|
1fe66f5d39 | ||
|
|
c2ed484653 | ||
|
|
0bf7596839 | ||
|
|
080af0ec53 | ||
|
|
ad3fd44a7f | ||
|
|
bb81ae5c8c | ||
|
|
d6ef5fe86a | ||
|
|
a2d31307bb | ||
|
|
0fb93cd740 | ||
|
|
32c61b3ece | ||
|
|
95cc8e3fc3 | ||
|
|
6f17158606 | ||
|
|
ac14f171ac | ||
|
|
a6cdf6572f | ||
|
|
0345bcf4ef | ||
|
|
93226b1945 | ||
|
|
e65652c3e8 | ||
|
|
df1c10260c | ||
|
|
d9a61c0fa9 | ||
|
|
bebf46c4a2 | ||
|
|
060987d755 | ||
|
|
a2fdabdad2 | ||
|
|
9e4b24a2d6 | ||
|
|
5c000f8d79 | ||
|
|
8c13e8a79b | ||
|
|
7ecf9996f1 | ||
|
|
fee91d43b7 | ||
|
|
62559b20b3 | ||
|
|
484a009012 | ||
|
|
27e73ebe57 | ||
|
|
6feddfae88 | ||
|
|
811e9cee8b | ||
|
|
144f2821af | ||
|
|
682d21c3de | ||
|
|
ee6c922c91 | ||
|
|
5b6d1a907d | ||
|
|
c038991590 | ||
|
|
b868c78a12 | ||
|
|
d310f9c71e | ||
|
|
ba9dc04ffa | ||
|
|
8021d2a2ab | ||
|
|
c9e9470c5a | ||
|
|
ee1adaacaa | ||
|
|
9639457222 | ||
|
|
3ef8b24277 | ||
|
|
36c2ca3c8b | ||
|
|
6e1e0c7d5c | ||
|
|
26a37dce0a | ||
|
|
335bd01e45 | ||
|
|
23a05c3986 | ||
|
|
6da3d92b42 | ||
|
|
d6e34f9ee5 | ||
|
|
487aff7e46 | ||
|
|
ba4a309d98 | ||
|
|
66a1e3f083 | ||
|
|
0989c48028 | ||
|
|
2fbe82f5e6 | ||
|
|
3a8d1d8838 | ||
|
|
64e17bd793 | ||
|
|
c3d169ab00 | ||
|
|
70bde15480 | ||
|
|
67a5cc34c6 | ||
|
|
c1eb95b967 | ||
|
|
47ce8d5a57 | ||
|
|
6ac6158a07 | ||
|
|
aa9faa8512 | ||
|
|
c1aa237bc2 | ||
|
|
eb0a2fd53a | ||
|
|
2d77e5e3a1 | ||
|
|
1ebb5a70ad | ||
|
|
683fb45c6b | ||
|
|
b00fd1dbde | ||
|
|
ee2c55ca09 | ||
|
|
7d451d0041 | ||
|
|
d297d90ad9 | ||
|
|
663747b730 | ||
|
|
df49404794 | ||
|
|
bd5d2c2674 | ||
|
|
3cd7fced5f | ||
|
|
b5c3a04e4b | ||
|
|
c9119b0e75 | ||
|
|
c306364b06 | ||
|
|
cd4c54282a | ||
|
|
6fa8626e2f | ||
|
|
94a838740e | ||
|
|
23fdd320bc | ||
|
|
9e53fa7d2e | ||
|
|
0694538c39 | ||
|
|
44602bdc20 | ||
|
|
9932f49b3e | ||
|
|
57e8e70daa | ||
|
|
2914abd747 | ||
|
|
23c5d87311 | ||
|
|
bec3eee3fa | ||
|
|
43110daea5 | ||
|
|
59f10ab3e0 | ||
|
|
2f709d94d7 | ||
|
|
7230e430db | ||
|
|
daab9789a8 | ||
|
|
642975dd9f | ||
|
|
7a39fe60da | ||
|
|
b879184595 | ||
|
|
8b4b75e543 | ||
|
|
0e5bf16d00 | ||
|
|
bd38073d76 | ||
|
|
4d1c21d97d | ||
|
|
2a6f78a53f | ||
|
|
1ce1a10f2b | ||
|
|
aa0bc7467c | ||
|
|
86ff8a3fb4 | ||
|
|
d640605694 | ||
|
|
2b10c4dd52 | ||
|
|
2fcab9acd9 | ||
|
|
ab55f6996d | ||
|
|
86fe484e24 | ||
|
|
8a01760a0f | ||
|
|
12b1caf295 | ||
|
|
45ddf4d26f | ||
|
|
3b473d10f2 | ||
|
|
1fdf63fa6c | ||
|
|
7860e4c649 | ||
|
|
67e6744e0f | ||
|
|
5a8909440b | ||
|
|
a36935b520 | ||
|
|
c9e96bb5e2 | ||
|
|
8d2909ee25 | ||
|
|
bef50ded63 | ||
|
|
0f7f448603 | ||
|
|
3441a11b21 | ||
|
|
6d3e9eaf84 | ||
|
|
14422a4220 | ||
|
|
6c938da302 | ||
|
|
5f8a307565 | ||
|
|
bf95414758 | ||
|
|
e4f51f59a2 | ||
|
|
9e788f09c6 | ||
|
|
3853fe9f64 | ||
|
|
8658d52587 | ||
|
|
9b6d04a187 | ||
|
|
aec13a6123 | ||
|
|
8a62fb0570 | ||
|
|
2407c353be | ||
|
|
dbdfa3d34e | ||
|
|
fd94aa8366 | ||
|
|
3e749369ef | ||
|
|
f1c3687aa5 | ||
|
|
b0b1a67771 | ||
|
|
b9c53e95b7 | ||
|
|
3c064a757f | ||
|
|
8f38b7a725 | ||
|
|
748f2ba9ea | ||
|
|
efe27ef849 | ||
|
|
c8f18a2524 | ||
|
|
5c63ac3dd7 | ||
|
|
845d8e0025 | ||
|
|
5c77f45b06 | ||
|
|
d6e9bd3011 | ||
|
|
db14d4326d | ||
|
|
fa4124b821 | ||
|
|
d7e12750df | ||
|
|
ea43c669f2 | ||
|
|
28b0b0d863 | ||
|
|
a7a4630bf4 | ||
|
|
c172611647 | ||
|
|
08d08d7c83 | ||
|
|
85094cbb3a | ||
|
|
90f19028e5 | ||
|
|
cc6191cb90 | ||
|
|
1a2ff56cd8 | ||
|
|
f479a337cc | ||
|
|
82d4afcac0 | ||
|
|
854ae3e1de | ||
|
|
3e241956d3 | ||
|
|
29493bb598 | ||
|
|
aab78a37f3 | ||
|
|
ce89b34fc0 | ||
|
|
b3efa38cc0 | ||
|
|
3331865f6b | ||
|
|
b3e74f2b98 | ||
|
|
4822beb298 | ||
|
|
a64a1943fd | ||
|
|
4f4ee8e2cf | ||
|
|
67428c4052 | ||
|
|
dc70c23a11 | ||
|
|
8b59bddc03 | ||
|
|
1fad39be1c | ||
|
|
d36332476c | ||
|
|
dc7c06bc07 | ||
|
|
2ddac9a7c3 | ||
|
|
d781560722 | ||
|
|
5e0b6b3e75 | ||
|
|
07ce39bfe7 | ||
|
|
17bbb7d2a5 | ||
|
|
804390ba4b | ||
|
|
6342217b93 | ||
|
|
8097bec472 | ||
|
|
9ec7151317 | ||
|
|
9fa9f05e5d | ||
|
|
2aca7fcdcf | ||
|
|
0e917e319b | ||
|
|
45092a36a2 | ||
|
|
e818c75f8a | ||
|
|
f931a9ce60 | ||
|
|
e57cf73cf5 | ||
|
|
40b2e2916b | ||
|
|
955cf186d2 | ||
|
|
790ea75cf7 | ||
|
|
8bbdb4f6a0 | ||
|
|
61f14f00d7 | ||
|
|
8c085fc697 | ||
|
|
893a924b90 | ||
|
|
d4befd0cfb | ||
|
|
8ed150b2fe | ||
|
|
989e4a92c2 | ||
|
|
2fa0ff1a2d | ||
|
|
078c5d9bc6 | ||
|
|
d4aec8fc8f | ||
|
|
bf16cefd18 | ||
|
|
38eccab3ae | ||
|
|
e1c2e2fdfa | ||
|
|
84b8e67c9c | ||
|
|
465fbaa30b | ||
|
|
12c906f6ce | ||
|
|
5653f36adc | ||
|
|
fe1304afc4 | ||
|
|
6598757037 | ||
|
|
d95e9fb67f | ||
|
|
9281841cfe | ||
|
|
7d8d0229fa | ||
|
|
4c437ebb9c | ||
|
|
891ae37437 | ||
|
|
28df4750ef | ||
|
|
5f1d1666e3 | ||
|
|
b54b19ba1c | ||
|
|
5e60d65917 | ||
|
|
898362de81 | ||
|
|
63a07f52df | ||
|
|
fd1061e7bf | ||
|
|
dc921f0823 | ||
|
|
a5028b6356 | ||
|
|
1202017c56 | ||
|
|
f386f71bb3 | ||
|
|
05ae8ca7d4 | ||
|
|
fdabd3cdf5 | ||
|
|
6986e44959 | ||
|
|
b8db73233c | ||
|
|
52896258ee | ||
|
|
520972fd0f | ||
|
|
748a6ae609 | ||
|
|
37cbbc00a9 | ||
|
|
a6b8ff23bd | ||
|
|
eca3640af7 | ||
|
|
82b5bdc7a1 | ||
|
|
baefbfb14e | ||
|
|
92969d49cb | ||
|
|
53bb7dbd29 | ||
|
|
ed26149a29 | ||
|
|
5b83130855 | ||
|
|
540f384197 | ||
|
|
ffad3985a1 | ||
|
|
6ccecf2363 | ||
|
|
9e694963a4 | ||
|
|
5da9dd1195 | ||
|
|
7c5063ef60 | ||
|
|
c2d09a5186 | ||
|
|
a936f696a6 | ||
|
|
2cd907ad7e | ||
|
|
2968f20970 | ||
|
|
481d3855dc | ||
|
|
9b7fb381a4 | ||
|
|
9e983c9500 | ||
|
|
6353991498 | ||
|
|
a9e2e98708 | ||
|
|
1aef8116de | ||
|
|
c8fd51e8c8 | ||
|
|
5ecebf168c | ||
|
|
243ba71b28 | ||
|
|
43c041cda5 | ||
|
|
a1614b88ac | ||
|
|
493afe4d8d | ||
|
|
9efab3ed66 | ||
|
|
13751c3297 | ||
|
|
0186e4e633 | ||
|
|
12e5ec6de3 | ||
|
|
8c95ac3145 | ||
|
|
477eb1745c | ||
|
|
a9c7d47c03 | ||
|
|
5ab3f9a995 | ||
|
|
70ae59bcfe | ||
|
|
8d1167b32f | ||
|
|
87d31a3ec0 | ||
|
|
d8aa72f51d | ||
|
|
3bcfbcc871 | ||
|
|
30e48c9878 | ||
|
|
6debadaa70 | ||
|
|
7984206c95 | ||
|
|
9111d3a636 | ||
|
|
06b04b80b8 | ||
|
|
5a3c65a756 | ||
|
|
ddc2274aea | ||
|
|
6622829c67 | ||
|
|
a7c347ab35 | ||
|
|
72f720fa38 | ||
|
|
42de5168b1 | ||
|
|
30c7951505 | ||
|
|
5560cc448c | ||
|
|
1c89e45c14 | ||
|
|
ad6b5f84e5 | ||
|
|
4f67ce485a | ||
|
|
a2cc9b55ba | ||
|
|
9428923bab | ||
|
|
645b1e142e | ||
|
|
7a922f3e48 | ||
|
|
b481b73805 | ||
|
|
ed980601e1 | ||
|
|
be51cd3bc9 | ||
|
|
c807f0a6dd | ||
|
|
dc61e23886 | ||
|
|
6a0d44d632 | ||
|
|
fa4d6f9f8b | ||
|
|
0ae5027d98 | ||
|
|
eb18f4e155 | ||
|
|
2a11a30572 | ||
|
|
936c6cc74a | ||
|
|
38adbfdf34 | ||
|
|
ce23f8293a | ||
|
|
c010ec8b71 | ||
|
|
939d113d10 | ||
|
|
bb69819267 | ||
|
|
1c7b3c75a7 | ||
|
|
d0cee65cdc | ||
|
|
5ae738c4fe | ||
|
|
cb6e5e56c2 | ||
|
|
c909ae0152 | ||
|
|
06d18c106d | ||
|
|
d6470aab60 | ||
|
|
3a750e130c | ||
|
|
f111efeb6e |
@@ -12,7 +12,7 @@
|
||||
|
||||
// The optional 'workspaceFolder' property is the path VS Code should open by default when
|
||||
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
|
||||
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
|
||||
"workspaceFolder": "/workspaces/langchain",
|
||||
|
||||
// Prevent the container from shutting down
|
||||
"overrideCommand": true
|
||||
|
||||
@@ -6,7 +6,7 @@ services:
|
||||
context: ..
|
||||
volumes:
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
- ..:/workspaces:cached
|
||||
- ..:/workspaces/langchain:cached
|
||||
networks:
|
||||
- langchain-network
|
||||
# environment:
|
||||
|
||||
26
.github/workflows/_release.yml
vendored
26
.github/workflows/_release.yml
vendored
@@ -13,6 +13,11 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
default: 'libs/langchain'
|
||||
dangerous-nonmaster-release:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Release from a non-master branch (danger!)"
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
@@ -20,7 +25,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master'
|
||||
if: github.ref == 'refs/heads/master' || inputs.dangerous-nonmaster-release
|
||||
environment: Scheduled testing
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -73,8 +78,10 @@ jobs:
|
||||
- build
|
||||
uses:
|
||||
./.github/workflows/_test_release.yml
|
||||
permissions: write-all
|
||||
with:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
dangerous-nonmaster-release: ${{ inputs.dangerous-nonmaster-release }}
|
||||
secrets: inherit
|
||||
|
||||
pre-release-checks:
|
||||
@@ -112,7 +119,7 @@ jobs:
|
||||
PKG_NAME: ${{ needs.build.outputs.pkg-name }}
|
||||
VERSION: ${{ needs.build.outputs.version }}
|
||||
# Here we use:
|
||||
# - The default regular PyPI index as the *primary* index, meaning
|
||||
# - The default regular PyPI index as the *primary* index, meaning
|
||||
# that it takes priority (https://pypi.org/simple)
|
||||
# - The test PyPI index as an extra index, so that any dependencies that
|
||||
# are not found on test PyPI can be resolved and installed anyway.
|
||||
@@ -171,7 +178,7 @@ jobs:
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
run: |
|
||||
poetry run pip install $MIN_VERSIONS
|
||||
poetry run pip install --force-reinstall $MIN_VERSIONS
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
@@ -291,14 +298,13 @@ jobs:
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
- name: Create Release
|
||||
|
||||
- name: Create Tag
|
||||
uses: ncipollo/release-action@v1
|
||||
if: ${{ inputs.working-directory == 'libs/langchain' }}
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: true
|
||||
tag: v${{ needs.build.outputs.version }}
|
||||
commit: master
|
||||
generateReleaseNotes: false
|
||||
tag: ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}
|
||||
body: "# Release ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}\n\nPackage-specific release note generation coming soon."
|
||||
commit: ${{ github.sha }}
|
||||
|
||||
59
.github/workflows/_release_note_experiments.yml
vendored
59
.github/workflows/_release_note_experiments.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: release note experiments
|
||||
run-name: Release note for ${{ inputs.working-directory }} by @${{ github.actor }}
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
default: 'libs/langchain'
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: "3.11"
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
pkg-name: ${{ steps.check-version.outputs.pkg-name }}
|
||||
version: ${{ steps.check-version.outputs.version }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
- name: Check Version
|
||||
id: check-version
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT
|
||||
echo version="$(poetry version --short)" >> $GITHUB_OUTPUT
|
||||
release-notes:
|
||||
needs:
|
||||
- build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
- name: Generate Release Notes
|
||||
env:
|
||||
TAG_NAME: ${{ needs.build.outputs.pkg-name }}-v${{ needs.build.outputs.version }}
|
||||
RELEASE_NAME: ${{ needs.build.outputs.pkg-name }}==${{ needs.build.outputs.version }}
|
||||
run: |
|
||||
echo "TAG_NAME=${TAG_NAME}"
|
||||
echo "RELEASE_NAME=${RELEASE_NAME}"
|
||||
7
.github/workflows/_test_release.yml
vendored
7
.github/workflows/_test_release.yml
vendored
@@ -7,6 +7,11 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
dangerous-nonmaster-release:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: "Release from a non-master branch (danger!)"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -14,7 +19,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.ref == 'refs/heads/master'
|
||||
if: github.ref == 'refs/heads/master' || inputs.dangerous-nonmaster-release
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
|
||||
4
.github/workflows/codespell.yml
vendored
4
.github/workflows/codespell.yml
vendored
@@ -3,9 +3,9 @@ name: CI / cd . / make spell_check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
branches: [master, v0.1]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
branches: [master, v0.1]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
6
.github/workflows/scheduled_test.yml
vendored
6
.github/workflows/scheduled_test.yml
vendored
@@ -19,11 +19,11 @@ jobs:
|
||||
working-directory:
|
||||
- "libs/partners/openai"
|
||||
- "libs/partners/anthropic"
|
||||
# - "libs/partners/ai21" # standard-tests broken
|
||||
- "libs/partners/ai21"
|
||||
- "libs/partners/fireworks"
|
||||
# - "libs/partners/groq" # rate-limited
|
||||
- "libs/partners/groq"
|
||||
- "libs/partners/mistralai"
|
||||
# - "libs/partners/together" # rate-limited
|
||||
- "libs/partners/together"
|
||||
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
15
Makefile
15
Makefile
@@ -17,16 +17,11 @@ clean: docs_clean api_docs_clean
|
||||
|
||||
## docs_build: Build the documentation.
|
||||
docs_build:
|
||||
docs/.local_build.sh
|
||||
cd docs && make build
|
||||
|
||||
## docs_clean: Clean the documentation build artifacts.
|
||||
docs_clean:
|
||||
@if [ -d _dist ]; then \
|
||||
rm -r _dist; \
|
||||
echo "Directory _dist has been cleaned."; \
|
||||
else \
|
||||
echo "Nothing to clean."; \
|
||||
fi
|
||||
cd docs && make clean
|
||||
|
||||
## docs_linkcheck: Run linkchecker on the documentation.
|
||||
docs_linkcheck:
|
||||
@@ -60,12 +55,12 @@ spell_fix:
|
||||
|
||||
## lint: Run linting on the project.
|
||||
lint lint_package lint_tests:
|
||||
poetry run ruff docs templates cookbook
|
||||
poetry run ruff check docs templates cookbook
|
||||
poetry run ruff format docs templates cookbook --diff
|
||||
poetry run ruff --select I docs templates cookbook
|
||||
poetry run ruff check --select I docs templates cookbook
|
||||
git grep 'from langchain import' docs/docs templates cookbook | grep -vE 'from langchain import (hub)' && exit 1 || exit 0
|
||||
|
||||
## format: Format the project files.
|
||||
format format_diff:
|
||||
poetry run ruff format docs templates cookbook
|
||||
poetry run ruff --select I --fix docs templates cookbook
|
||||
poetry run ruff check --select I --fix docs templates cookbook
|
||||
|
||||
@@ -47,7 +47,7 @@ For these applications, LangChain simplifies the entire application lifecycle:
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[LangGraph](https://python.langchain.com/docs/langgraph)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
- **[`LangGraph`](https://python.langchain.com/docs/langgraph)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
|
||||
### Productionization:
|
||||
- **[LangSmith](https://python.langchain.com/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.
|
||||
|
||||
@@ -464,8 +464,8 @@
|
||||
" Check if the base64 data is an image by looking at the start of the data\n",
|
||||
" \"\"\"\n",
|
||||
" image_signatures = {\n",
|
||||
" b\"\\xFF\\xD8\\xFF\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\": \"png\",\n",
|
||||
" b\"\\xff\\xd8\\xff\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\": \"png\",\n",
|
||||
" b\"\\x47\\x49\\x46\\x38\": \"gif\",\n",
|
||||
" b\"\\x52\\x49\\x46\\x46\": \"webp\",\n",
|
||||
" }\n",
|
||||
@@ -604,7 +604,7 @@
|
||||
"source": [
|
||||
"# Check retrieval\n",
|
||||
"query = \"Give me company names that are interesting investments based on EV / NTM and NTM rev growth. Consider EV / NTM multiples vs historical?\"\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=6)\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=6)\n",
|
||||
"\n",
|
||||
"# We get 4 docs\n",
|
||||
"len(docs)"
|
||||
@@ -630,7 +630,7 @@
|
||||
"source": [
|
||||
"# Check retrieval\n",
|
||||
"query = \"What are the EV / NTM and NTM rev growth for MongoDB, Cloudflare, and Datadog?\"\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=6)\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=6)\n",
|
||||
"\n",
|
||||
"# We get 4 docs\n",
|
||||
"len(docs)"
|
||||
|
||||
@@ -185,7 +185,7 @@
|
||||
" )\n",
|
||||
" # Text summary chain\n",
|
||||
" model = VertexAI(\n",
|
||||
" temperature=0, model_name=\"gemini-pro\", max_output_tokens=1024\n",
|
||||
" temperature=0, model_name=\"gemini-pro\", max_tokens=1024\n",
|
||||
" ).with_fallbacks([empty_response])\n",
|
||||
" summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
@@ -254,9 +254,9 @@
|
||||
"\n",
|
||||
"def image_summarize(img_base64, prompt):\n",
|
||||
" \"\"\"Make image summary\"\"\"\n",
|
||||
" model = ChatVertexAI(model_name=\"gemini-pro-vision\", max_output_tokens=1024)\n",
|
||||
" model = ChatVertexAI(model=\"gemini-pro-vision\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
" msg = model(\n",
|
||||
" msg = model.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
@@ -462,8 +462,8 @@
|
||||
" Check if the base64 data is an image by looking at the start of the data\n",
|
||||
" \"\"\"\n",
|
||||
" image_signatures = {\n",
|
||||
" b\"\\xFF\\xD8\\xFF\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\": \"png\",\n",
|
||||
" b\"\\xff\\xd8\\xff\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\": \"png\",\n",
|
||||
" b\"\\x47\\x49\\x46\\x38\": \"gif\",\n",
|
||||
" b\"\\x52\\x49\\x46\\x46\": \"webp\",\n",
|
||||
" }\n",
|
||||
@@ -553,9 +553,7 @@
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" # Multi-modal LLM\n",
|
||||
" model = ChatVertexAI(\n",
|
||||
" temperature=0, model_name=\"gemini-pro-vision\", max_output_tokens=1024\n",
|
||||
" )\n",
|
||||
" model = ChatVertexAI(temperature=0, model_name=\"gemini-pro-vision\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
" # RAG pipeline\n",
|
||||
" chain = (\n",
|
||||
@@ -604,7 +602,7 @@
|
||||
],
|
||||
"source": [
|
||||
"query = \"What are the EV / NTM and NTM rev growth for MongoDB, Cloudflare, and Datadog?\"\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query, limit=1)\n",
|
||||
"docs = retriever_multi_vector_img.invoke(query, limit=1)\n",
|
||||
"\n",
|
||||
"# We get 2 docs\n",
|
||||
"len(docs)"
|
||||
|
||||
@@ -47,6 +47,7 @@ Notebook | Description
|
||||
[press_releases.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/press_releases.ipynb) | Retrieve and query company press release data powered by [Kay.ai](https://kay.ai).
|
||||
[program_aided_language_model.i...](https://github.com/langchain-ai/langchain/tree/master/cookbook/program_aided_language_model.ipynb) | Implement program-aided language models as described in the provided research paper.
|
||||
[qa_citations.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/qa_citations.ipynb) | Different ways to get a model to cite its sources.
|
||||
[rag_upstage_layout_analysis_groundedness_check.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag_upstage_layout_analysis_groundedness_check.ipynb) | End-to-end RAG example using Upstage Layout Analysis and Groundedness Check.
|
||||
[retrieval_in_sql.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/retrieval_in_sql.ipynb) | Perform retrieval-augmented-generation (rag) on a PostgreSQL database using pgvector.
|
||||
[sales_agent_with_context.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/sales_agent_with_context.ipynb) | Implement a context-aware ai sales agent, salesgpt, that can have natural sales conversations, interact with other systems, and use a product knowledge base to discuss a company's offerings.
|
||||
[self_query_hotel_search.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/self_query_hotel_search.ipynb) | Build a hotel room search feature with self-querying retrieval, using a specific hotel recommendation dataset.
|
||||
@@ -56,3 +57,4 @@ Notebook | Description
|
||||
[two_agent_debate_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_agent_debate_tools.ipynb) | Simulate multi-agent dialogues where the agents can utilize various tools.
|
||||
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
|
||||
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
|
||||
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
|
||||
@@ -75,7 +75,7 @@
|
||||
"\n",
|
||||
"Apply to the [`LLaMA2`](https://arxiv.org/pdf/2307.09288.pdf) paper. \n",
|
||||
"\n",
|
||||
"We use the Unstructured [`partition_pdf`](https://unstructured-io.github.io/unstructured/bricks/partition.html#partition-pdf), which segments a PDF document by using a layout model. \n",
|
||||
"We use the Unstructured [`partition_pdf`](https://unstructured-io.github.io/unstructured/core/partition.html#partition-pdf), which segments a PDF document by using a layout model. \n",
|
||||
"\n",
|
||||
"This layout model makes it possible to extract elements, such as tables, from pdfs. \n",
|
||||
"\n",
|
||||
|
||||
@@ -562,9 +562,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# We can retrieve this table\n",
|
||||
"retriever.get_relevant_documents(\n",
|
||||
" \"What are results for LLaMA across across domains / subjects?\"\n",
|
||||
")[1]"
|
||||
"retriever.invoke(\"What are results for LLaMA across across domains / subjects?\")[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -614,9 +612,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[\n",
|
||||
" 1\n",
|
||||
"]"
|
||||
"retriever.invoke(\"Images / figures with playful and creative examples\")[1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -501,9 +501,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"retriever.get_relevant_documents(\"Images / figures with playful and creative examples\")[\n",
|
||||
" 0\n",
|
||||
"]"
|
||||
"retriever.invoke(\"Images / figures with playful and creative examples\")[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -342,7 +342,7 @@
|
||||
"# Testing on retrieval\n",
|
||||
"query = \"What percentage of CPI is dedicated to Housing, and how does it compare to the combined percentage of Medical Care, Apparel, and Other Goods and Services?\"\n",
|
||||
"suffix_for_images = \" Include any pie charts, graphs, or tables.\"\n",
|
||||
"docs = retriever_multi_vector_img.get_relevant_documents(query + suffix_for_images)"
|
||||
"docs = retriever_multi_vector_img.invoke(query + suffix_for_images)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -532,8 +532,8 @@
|
||||
"def is_image_data(b64data):\n",
|
||||
" \"\"\"Check if the base64 data is an image by looking at the start of the data.\"\"\"\n",
|
||||
" image_signatures = {\n",
|
||||
" b\"\\xFF\\xD8\\xFF\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\": \"png\",\n",
|
||||
" b\"\\xff\\xd8\\xff\": \"jpg\",\n",
|
||||
" b\"\\x89\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\": \"png\",\n",
|
||||
" b\"\\x47\\x49\\x46\\x38\": \"gif\",\n",
|
||||
" b\"\\x52\\x49\\x46\\x46\": \"webp\",\n",
|
||||
" }\n",
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
" ) -> AIMessage:\n",
|
||||
" messages = self.update_messages(input_message)\n",
|
||||
"\n",
|
||||
" output_message = self.model(messages)\n",
|
||||
" output_message = self.model.invoke(messages)\n",
|
||||
" self.update_messages(output_message)\n",
|
||||
"\n",
|
||||
" return output_message"
|
||||
|
||||
557
cookbook/cql_agent.ipynb
Normal file
557
cookbook/cql_agent.ipynb
Normal file
@@ -0,0 +1,557 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup Environment"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Python Modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Install the following Python modules:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install ipykernel python-dotenv cassio pandas langchain_openai langchain langchain-community langchainhub langchain_experimental openai-multi-tool-use-parallel-patch\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load the `.env` File"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Connection is via `cassio` using `auto=True` parameter, and the notebook uses OpenAI. You should create a `.env` file accordingly.\n",
|
||||
"\n",
|
||||
"For Casssandra, set:\n",
|
||||
"```bash\n",
|
||||
"CASSANDRA_CONTACT_POINTS\n",
|
||||
"CASSANDRA_USERNAME\n",
|
||||
"CASSANDRA_PASSWORD\n",
|
||||
"CASSANDRA_KEYSPACE\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"For Astra, set:\n",
|
||||
"```bash\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN\n",
|
||||
"ASTRA_DB_DATABASE_ID\n",
|
||||
"ASTRA_DB_KEYSPACE\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"For example:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"# Connection to Astra:\n",
|
||||
"ASTRA_DB_DATABASE_ID=a1b2c3d4-...\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN=AstraCS:...\n",
|
||||
"ASTRA_DB_KEYSPACE=notebooks\n",
|
||||
"\n",
|
||||
"# Also set \n",
|
||||
"OPENAI_API_KEY=sk-....\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"(You may also modify the below code to directly connect with `cassio`.)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from dotenv import load_dotenv\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Cassandra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import cassio\n",
|
||||
"\n",
|
||||
"cassio.init(auto=True)\n",
|
||||
"session = cassio.config.resolve_session()\n",
|
||||
"if not session:\n",
|
||||
" raise Exception(\n",
|
||||
" \"Check environment configuration or manually configure cassio connection parameters\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"keyspace = os.environ.get(\n",
|
||||
" \"ASTRA_DB_KEYSPACE\", os.environ.get(\"CASSANDRA_KEYSPACE\", None)\n",
|
||||
")\n",
|
||||
"if not keyspace:\n",
|
||||
" raise ValueError(\"a KEYSPACE environment variable must be set\")\n",
|
||||
"\n",
|
||||
"session.set_keyspace(keyspace)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup Database"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This needs to be done one time only!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Download Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The dataset used is from Kaggle, the [Environmental Sensor Telemetry Data](https://www.kaggle.com/datasets/garystafford/environmental-sensor-data-132k?select=iot_telemetry_data.csv). The next cell will download and unzip the data into a Pandas dataframe. The following cell is instructions to download manually. \n",
|
||||
"\n",
|
||||
"The net result of this section is you should have a Pandas dataframe variable `df`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Download Automatically"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from io import BytesIO\n",
|
||||
"from zipfile import ZipFile\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"datasetURL = \"https://storage.googleapis.com/kaggle-data-sets/788816/1355729/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20240404%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20240404T115828Z&X-Goog-Expires=259200&X-Goog-SignedHeaders=host&X-Goog-Signature=2849f003b100eb9dcda8dd8535990f51244292f67e4f5fad36f14aa67f2d4297672d8fe6ff5a39f03a29cda051e33e95d36daab5892b8874dcd5a60228df0361fa26bae491dd4371f02dd20306b583a44ba85a4474376188b1f84765147d3b4f05c57345e5de883c2c29653cce1f3755cd8e645c5e952f4fb1c8a735b22f0c811f97f7bce8d0235d0d3731ca8ab4629ff381f3bae9e35fc1b181c1e69a9c7913a5e42d9d52d53e5f716467205af9c8a3cc6746fc5352e8fbc47cd7d18543626bd67996d18c2045c1e475fc136df83df352fa747f1a3bb73e6ba3985840792ec1de407c15836640ec96db111b173bf16115037d53fdfbfd8ac44145d7f9a546aa\"\n",
|
||||
"\n",
|
||||
"response = requests.get(datasetURL)\n",
|
||||
"if response.status_code == 200:\n",
|
||||
" zip_file = ZipFile(BytesIO(response.content))\n",
|
||||
" csv_file_name = zip_file.namelist()[0]\n",
|
||||
"else:\n",
|
||||
" print(\"Failed to download the file\")\n",
|
||||
"\n",
|
||||
"with zip_file.open(csv_file_name) as csv_file:\n",
|
||||
" df = pd.read_csv(csv_file)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Download Manually"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can download the `.zip` file and unpack the `.csv` contained within. Comment in the next line, and adjust the path to this `.csv` file appropriately."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# df = pd.read_csv(\"/path/to/iot_telemetry_data.csv\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Data into Cassandra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This section assumes the existence of a dataframe `df`, the following cell validates its structure. The Download section above creates this object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assert df is not None, \"Dataframe 'df' must be set\"\n",
|
||||
"expected_columns = [\n",
|
||||
" \"ts\",\n",
|
||||
" \"device\",\n",
|
||||
" \"co\",\n",
|
||||
" \"humidity\",\n",
|
||||
" \"light\",\n",
|
||||
" \"lpg\",\n",
|
||||
" \"motion\",\n",
|
||||
" \"smoke\",\n",
|
||||
" \"temp\",\n",
|
||||
"]\n",
|
||||
"assert all(\n",
|
||||
" [column in df.columns for column in expected_columns]\n",
|
||||
"), \"DataFrame does not have the expected columns\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create and load tables:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import UTC, datetime\n",
|
||||
"\n",
|
||||
"from cassandra.query import BatchStatement\n",
|
||||
"\n",
|
||||
"# Create sensors table\n",
|
||||
"table_query = \"\"\"\n",
|
||||
"CREATE TABLE IF NOT EXISTS iot_sensors (\n",
|
||||
" device text,\n",
|
||||
" conditions text,\n",
|
||||
" room text,\n",
|
||||
" PRIMARY KEY (device)\n",
|
||||
")\n",
|
||||
"WITH COMMENT = 'Environmental IoT room sensor metadata.';\n",
|
||||
"\"\"\"\n",
|
||||
"session.execute(table_query)\n",
|
||||
"\n",
|
||||
"pstmt = session.prepare(\n",
|
||||
" \"\"\"\n",
|
||||
"INSERT INTO iot_sensors (device, conditions, room)\n",
|
||||
"VALUES (?, ?, ?)\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"devices = [\n",
|
||||
" (\"00:0f:00:70:91:0a\", \"stable conditions, cooler and more humid\", \"room 1\"),\n",
|
||||
" (\"1c:bf:ce:15:ec:4d\", \"highly variable temperature and humidity\", \"room 2\"),\n",
|
||||
" (\"b8:27:eb:bf:9d:51\", \"stable conditions, warmer and dryer\", \"room 3\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"for device, conditions, room in devices:\n",
|
||||
" session.execute(pstmt, (device, conditions, room))\n",
|
||||
"\n",
|
||||
"print(\"Sensors inserted successfully.\")\n",
|
||||
"\n",
|
||||
"# Create data table\n",
|
||||
"table_query = \"\"\"\n",
|
||||
"CREATE TABLE IF NOT EXISTS iot_data (\n",
|
||||
" day text,\n",
|
||||
" device text,\n",
|
||||
" ts timestamp,\n",
|
||||
" co double,\n",
|
||||
" humidity double,\n",
|
||||
" light boolean,\n",
|
||||
" lpg double,\n",
|
||||
" motion boolean,\n",
|
||||
" smoke double,\n",
|
||||
" temp double,\n",
|
||||
" PRIMARY KEY ((day, device), ts)\n",
|
||||
")\n",
|
||||
"WITH COMMENT = 'Data from environmental IoT room sensors. Columns include device identifier, timestamp (ts) of the data collection, carbon monoxide level (co), relative humidity, light presence, LPG concentration, motion detection, smoke concentration, and temperature (temp). Data is partitioned by day and device.';\n",
|
||||
"\"\"\"\n",
|
||||
"session.execute(table_query)\n",
|
||||
"\n",
|
||||
"pstmt = session.prepare(\n",
|
||||
" \"\"\"\n",
|
||||
"INSERT INTO iot_data (day, device, ts, co, humidity, light, lpg, motion, smoke, temp)\n",
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def insert_data_batch(name, group):\n",
|
||||
" batch = BatchStatement()\n",
|
||||
" day, device = name\n",
|
||||
" print(f\"Inserting batch for day: {day}, device: {device}\")\n",
|
||||
"\n",
|
||||
" for _, row in group.iterrows():\n",
|
||||
" timestamp = datetime.fromtimestamp(row[\"ts\"], UTC)\n",
|
||||
" batch.add(\n",
|
||||
" pstmt,\n",
|
||||
" (\n",
|
||||
" day,\n",
|
||||
" row[\"device\"],\n",
|
||||
" timestamp,\n",
|
||||
" row[\"co\"],\n",
|
||||
" row[\"humidity\"],\n",
|
||||
" row[\"light\"],\n",
|
||||
" row[\"lpg\"],\n",
|
||||
" row[\"motion\"],\n",
|
||||
" row[\"smoke\"],\n",
|
||||
" row[\"temp\"],\n",
|
||||
" ),\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" session.execute(batch)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Convert columns to appropriate types\n",
|
||||
"df[\"light\"] = df[\"light\"] == \"true\"\n",
|
||||
"df[\"motion\"] = df[\"motion\"] == \"true\"\n",
|
||||
"df[\"ts\"] = df[\"ts\"].astype(float)\n",
|
||||
"df[\"day\"] = df[\"ts\"].apply(\n",
|
||||
" lambda x: datetime.fromtimestamp(x, UTC).strftime(\"%Y-%m-%d\")\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"grouped_df = df.groupby([\"day\", \"device\"])\n",
|
||||
"\n",
|
||||
"for name, group in grouped_df:\n",
|
||||
" insert_data_batch(name, group)\n",
|
||||
"\n",
|
||||
"print(\"Data load complete\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(session.keyspace)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the Tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Python `import` statements for the demo:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, create_openai_tools_agent\n",
|
||||
"from langchain_community.agent_toolkits.cassandra_database.toolkit import (\n",
|
||||
" CassandraDatabaseToolkit,\n",
|
||||
")\n",
|
||||
"from langchain_community.tools.cassandra_database.prompt import QUERY_PATH_PROMPT\n",
|
||||
"from langchain_community.tools.cassandra_database.tool import (\n",
|
||||
" GetSchemaCassandraDatabaseTool,\n",
|
||||
" GetTableDataCassandraDatabaseTool,\n",
|
||||
" QueryCassandraDatabaseTool,\n",
|
||||
")\n",
|
||||
"from langchain_community.utilities.cassandra_database import CassandraDatabase\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The `CassandraDatabase` object is loaded from `cassio`, though it does accept a `Session`-type parameter as an alternative."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create a CassandraDatabase instance\n",
|
||||
"db = CassandraDatabase(include_tables=[\"iot_sensors\", \"iot_data\"])\n",
|
||||
"\n",
|
||||
"# Create the Cassandra Database tools\n",
|
||||
"query_tool = QueryCassandraDatabaseTool(db=db)\n",
|
||||
"schema_tool = GetSchemaCassandraDatabaseTool(db=db)\n",
|
||||
"select_data_tool = GetTableDataCassandraDatabaseTool(db=db)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The tools can be invoked directly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Test the tools\n",
|
||||
"print(\"Executing a CQL query:\")\n",
|
||||
"query = \"SELECT * FROM iot_sensors LIMIT 5;\"\n",
|
||||
"result = query_tool.run({\"query\": query})\n",
|
||||
"print(result)\n",
|
||||
"\n",
|
||||
"print(\"\\nGetting the schema for a keyspace:\")\n",
|
||||
"schema = schema_tool.run({\"keyspace\": keyspace})\n",
|
||||
"print(schema)\n",
|
||||
"\n",
|
||||
"print(\"\\nGetting data from a table:\")\n",
|
||||
"table = \"iot_data\"\n",
|
||||
"predicate = \"day = '2020-07-14' and device = 'b8:27:eb:bf:9d:51'\"\n",
|
||||
"data = select_data_tool.run(\n",
|
||||
" {\"keyspace\": keyspace, \"table\": table, \"predicate\": predicate, \"limit\": 5}\n",
|
||||
")\n",
|
||||
"print(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Agent Configuration"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import Tool\n",
|
||||
"from langchain_experimental.utilities import PythonREPL\n",
|
||||
"\n",
|
||||
"python_repl = PythonREPL()\n",
|
||||
"\n",
|
||||
"repl_tool = Tool(\n",
|
||||
" name=\"python_repl\",\n",
|
||||
" description=\"A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.\",\n",
|
||||
" func=python_repl.run,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-4-1106-preview\")\n",
|
||||
"toolkit = CassandraDatabaseToolkit(db=db)\n",
|
||||
"\n",
|
||||
"# context = toolkit.get_context()\n",
|
||||
"# tools = toolkit.get_tools()\n",
|
||||
"tools = [schema_tool, select_data_tool, repl_tool]\n",
|
||||
"\n",
|
||||
"input = (\n",
|
||||
" QUERY_PATH_PROMPT\n",
|
||||
" + f\"\"\"\n",
|
||||
"\n",
|
||||
"Here is your task: In the {keyspace} keyspace, find the total number of times the temperature of each device has exceeded 23 degrees on July 14, 2020.\n",
|
||||
" Create a summary report including the name of the room. Use Pandas if helpful.\n",
|
||||
"\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = hub.pull(\"hwchase17/openai-tools-agent\")\n",
|
||||
"\n",
|
||||
"# messages = [\n",
|
||||
"# HumanMessagePromptTemplate.from_template(input),\n",
|
||||
"# AIMessage(content=QUERY_PATH_PROMPT),\n",
|
||||
"# MessagesPlaceholder(variable_name=\"agent_scratchpad\"),\n",
|
||||
"# ]\n",
|
||||
"\n",
|
||||
"# prompt = ChatPromptTemplate.from_messages(messages)\n",
|
||||
"# print(prompt)\n",
|
||||
"\n",
|
||||
"# Choose the LLM that will drive the agent\n",
|
||||
"# Only certain models support this\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)\n",
|
||||
"\n",
|
||||
"# Construct the OpenAI Tools agent\n",
|
||||
"agent = create_openai_tools_agent(llm, tools, prompt)\n",
|
||||
"\n",
|
||||
"print(\"Available tools:\")\n",
|
||||
"for tool in tools:\n",
|
||||
" print(\"\\t\" + tool.name + \" - \" + tool.description + \" - \" + str(tool))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\n",
|
||||
"\n",
|
||||
"response = agent_executor.invoke({\"input\": input})\n",
|
||||
"\n",
|
||||
"print(response[\"output\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -169,7 +169,7 @@
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" # Get the toolkits, one for each plugin\n",
|
||||
" tool_kits = [toolkits_dict[d.metadata[\"plugin_name\"]] for d in docs]\n",
|
||||
" # Get the tools: a separate NLAChain for each endpoint\n",
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" # Get documents, which contain the Plugins to use\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" # Get the toolkits, one for each plugin\n",
|
||||
" tool_kits = [toolkits_dict[d.metadata[\"plugin_name\"]] for d in docs]\n",
|
||||
" # Get the tools: a separate NLAChain for each endpoint\n",
|
||||
|
||||
@@ -142,7 +142,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_tools(query):\n",
|
||||
" docs = retriever.get_relevant_documents(query)\n",
|
||||
" docs = retriever.invoke(query)\n",
|
||||
" return [ALL_TOOLS[d.metadata[\"index\"]] for d in docs]"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -362,7 +362,7 @@
|
||||
],
|
||||
"source": [
|
||||
"llm = OpenAI()\n",
|
||||
"llm(query)"
|
||||
"llm.invoke(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
" return obs_message\n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" act_message = self.model.invoke(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
|
||||
@@ -206,7 +206,7 @@
|
||||
" print(\"---RETRIEVE---\")\n",
|
||||
" state_dict = state[\"keys\"]\n",
|
||||
" question = state_dict[\"question\"]\n",
|
||||
" documents = retriever.get_relevant_documents(question)\n",
|
||||
" documents = retriever.invoke(question)\n",
|
||||
" return {\"keys\": {\"documents\": documents, \"question\": question}}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -213,7 +213,7 @@
|
||||
" print(\"---RETRIEVE---\")\n",
|
||||
" state_dict = state[\"keys\"]\n",
|
||||
" question = state_dict[\"question\"]\n",
|
||||
" documents = retriever.get_relevant_documents(question)\n",
|
||||
" documents = retriever.invoke(question)\n",
|
||||
" return {\"keys\": {\"documents\": documents, \"question\": question}}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -435,7 +435,7 @@
|
||||
" display(HTML(image_html))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"docs = retriever.get_relevant_documents(\"Woman with children\", k=10)\n",
|
||||
"docs = retriever.invoke(\"Woman with children\", k=10)\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
" plt_img_base64(doc.page_content)\n",
|
||||
|
||||
@@ -443,7 +443,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"query = \"Woman with children\"\n",
|
||||
"docs = retriever.get_relevant_documents(query, k=10)\n",
|
||||
"docs = retriever.invoke(query, k=10)\n",
|
||||
"\n",
|
||||
"for doc in docs:\n",
|
||||
" if is_base64(doc.page_content):\n",
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
|
||||
@@ -79,7 +79,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
@@ -234,7 +234,7 @@
|
||||
" termination_clause=self.termination_clause if self.stop else \"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" self.response = self.model(\n",
|
||||
" self.response = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=response_prompt),\n",
|
||||
@@ -263,7 +263,7 @@
|
||||
" speaker_names=speaker_names,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" choice_string = self.model(\n",
|
||||
" choice_string = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=choice_prompt),\n",
|
||||
@@ -299,7 +299,7 @@
|
||||
" ),\n",
|
||||
" next_speaker=self.next_speaker,\n",
|
||||
" )\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=next_prompt),\n",
|
||||
|
||||
@@ -71,7 +71,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
@@ -164,7 +164,7 @@
|
||||
" message_history=\"\\n\".join(self.message_history),\n",
|
||||
" recent_message=self.message_history[-1],\n",
|
||||
" )\n",
|
||||
" bid_string = self.model([SystemMessage(content=prompt)]).content\n",
|
||||
" bid_string = self.model.invoke([SystemMessage(content=prompt)]).content\n",
|
||||
" return bid_string"
|
||||
]
|
||||
},
|
||||
|
||||
872
cookbook/oracleai_demo.ipynb
Normal file
872
cookbook/oracleai_demo.ipynb
Normal file
@@ -0,0 +1,872 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Oracle AI Vector Search with Document Processing\n",
|
||||
"Oracle AI Vector Search is designed for Artificial Intelligence (AI) workloads that allows you to query data based on semantics, rather than keywords.\n",
|
||||
"One of the biggest benefit of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system. This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.\n",
|
||||
"\n",
|
||||
"In addition, because Oracle has been building database technologies for so long, your vectors can benefit from all of Oracle Database's most powerful features, like the following:\n",
|
||||
"\n",
|
||||
" * Partitioning Support\n",
|
||||
" * Real Application Clusters scalability\n",
|
||||
" * Exadata smart scans\n",
|
||||
" * Shard processing across geographically distributed databases\n",
|
||||
" * Transactions\n",
|
||||
" * Parallel SQL\n",
|
||||
" * Disaster recovery\n",
|
||||
" * Security\n",
|
||||
" * Oracle Machine Learning\n",
|
||||
" * Oracle Graph Database\n",
|
||||
" * Oracle Spatial and Graph\n",
|
||||
" * Oracle Blockchain\n",
|
||||
" * JSON\n",
|
||||
"\n",
|
||||
"This guide demonstrates how Oracle AI Vector Search can be used with Langchain to serve an end-to-end RAG pipeline. This guide goes through examples of:\n",
|
||||
"\n",
|
||||
" * Loading the documents from various sources using OracleDocLoader\n",
|
||||
" * Summarizing them within/outside the database using OracleSummary\n",
|
||||
" * Generating embeddings for them within/outside the database using OracleEmbeddings\n",
|
||||
" * Chunking them according to different requirements using Advanced Oracle Capabilities from OracleTextSplitter\n",
|
||||
" * Storing and Indexing them in a Vector Store and querying them for queries in OracleVS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"Please install Oracle Python Client driver to use Langchain with Oracle AI Vector Search. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install oracledb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Demo User\n",
|
||||
"First, create a demo user with all the required privileges. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n",
|
||||
"User setup done!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"# please make sure this user has sufficient privileges to perform all below\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" begin\n",
|
||||
" -- drop user\n",
|
||||
" begin\n",
|
||||
" execute immediate 'drop user testuser cascade';\n",
|
||||
" exception\n",
|
||||
" when others then\n",
|
||||
" dbms_output.put_line('Error setting up user.');\n",
|
||||
" end;\n",
|
||||
" execute immediate 'create user testuser identified by testuser';\n",
|
||||
" execute immediate 'grant connect, unlimited tablespace, create credential, create procedure, create any index to testuser';\n",
|
||||
" execute immediate 'create or replace directory DEMO_PY_DIR as ''/scratch/hroy/view_storage/hroy_devstorage/demo/orachain''';\n",
|
||||
" execute immediate 'grant read, write on directory DEMO_PY_DIR to public';\n",
|
||||
" execute immediate 'grant create mining model to testuser';\n",
|
||||
"\n",
|
||||
" -- network access\n",
|
||||
" begin\n",
|
||||
" DBMS_NETWORK_ACL_ADMIN.APPEND_HOST_ACE(\n",
|
||||
" host => '*',\n",
|
||||
" ace => xs$ace_type(privilege_list => xs$name_list('connect'),\n",
|
||||
" principal_name => 'testuser',\n",
|
||||
" principal_type => xs_acl.ptype_db));\n",
|
||||
" end;\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" print(\"User setup done!\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"User setup failed!\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Process Documents using Oracle AI\n",
|
||||
"Let's think about a scenario that the users have some documents in Oracle Database or in a file system. They want to use the data for Oracle AI Vector Search using Langchain.\n",
|
||||
"\n",
|
||||
"For that, the users need to do some document preprocessing. The first step would be to read the documents, generate their summary(if needed) and then chunk/split them if needed. After that, they need to generate the embeddings for those chunks and store into Oracle AI Vector Store. Finally, the users will perform some semantic queries on those data. \n",
|
||||
"\n",
|
||||
"Oracle AI Vector Search Langchain library provides a range of document processing functionalities including document loading, splitting, generating summary and embeddings.\n",
|
||||
"\n",
|
||||
"In the following sections, we will go through how to use Oracle AI Langchain APIs to achieve each of these functionalities individually. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Demo User\n",
|
||||
"The following sample code will show how to connect to Oracle Database. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Connection failed!\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Populate a Demo Table\n",
|
||||
"Create a demo table and insert some sample documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Table created and populated.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
"\n",
|
||||
" drop_table_sql = \"\"\"drop table demo_tab\"\"\"\n",
|
||||
" cursor.execute(drop_table_sql)\n",
|
||||
"\n",
|
||||
" create_table_sql = \"\"\"create table demo_tab (id number, data clob)\"\"\"\n",
|
||||
" cursor.execute(create_table_sql)\n",
|
||||
"\n",
|
||||
" insert_row_sql = \"\"\"insert into demo_tab values (:1, :2)\"\"\"\n",
|
||||
" rows_to_insert = [\n",
|
||||
" (\n",
|
||||
" 1,\n",
|
||||
" \"If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\",\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" 2,\n",
|
||||
" \"A tablespace can be online (accessible) or offline (not accessible) whenever the database is open.\\nA tablespace is usually online so that its data is available to users. The SYSTEM tablespace and temporary tablespaces cannot be taken offline.\",\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" 3,\n",
|
||||
" \"The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table.\\nSometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\",\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" cursor.executemany(insert_row_sql, rows_to_insert)\n",
|
||||
"\n",
|
||||
" conn.commit()\n",
|
||||
"\n",
|
||||
" print(\"Table created and populated.\")\n",
|
||||
" cursor.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Table creation failed.\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"Now that we have a demo user and a demo table with some data, we just need to do one more setup. For embedding and summary, we have a few provider options that the users can choose from such as database, 3rd party providers like ocigenai, huggingface, openai, etc. If the users choose to use 3rd party provider, they need to create a credential with corresponding authentication information. On the other hand, if the users choose to use 'database' as provider, they need to load an onnx model to Oracle Database for embeddings; however, for summary, they don't need to do anything."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load ONNX Model\n",
|
||||
"\n",
|
||||
"To generate embeddings, Oracle provides a few provider options for users to choose from. The users can choose 'database' provider or some 3rd party providers like OCIGENAI, HuggingFace, etc.\n",
|
||||
"\n",
|
||||
"***Note*** If the users choose database option, they need to load an ONNX model to Oracle Database. The users do not need to load an ONNX model to Oracle Database if they choose to use 3rd party provider to generate embeddings.\n",
|
||||
"\n",
|
||||
"One of the core benefits of using an ONNX model is that the users do not need to transfer their data to 3rd party to generate embeddings. And also, since it does not involve any network or REST API calls, it may provide better performance.\n",
|
||||
"\n",
|
||||
"Here is the sample code to load an ONNX model to Oracle Database:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ONNX model loaded.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.oracleai import OracleEmbeddings\n",
|
||||
"\n",
|
||||
"# please update with your related information\n",
|
||||
"# make sure that you have onnx file in the system\n",
|
||||
"onnx_dir = \"DEMO_PY_DIR\"\n",
|
||||
"onnx_file = \"tinybert.onnx\"\n",
|
||||
"model_name = \"demo_model\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" OracleEmbeddings.load_onnx_model(conn, onnx_dir, onnx_file, model_name)\n",
|
||||
" print(\"ONNX model loaded.\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"ONNX model loading failed!\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Credential\n",
|
||||
"\n",
|
||||
"On the other hand, if the users choose to use 3rd party provider to generate embeddings and summary, they need to create credential to access 3rd party provider's end points.\n",
|
||||
"\n",
|
||||
"***Note:*** The users do not need to create any credential if they choose to use 'database' provider to generate embeddings and summary. Should the users choose to 3rd party provider, they need to create credential for the 3rd party provider they want to use. \n",
|
||||
"\n",
|
||||
"Here is a sample example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
" cursor.execute(\n",
|
||||
" \"\"\"\n",
|
||||
" declare\n",
|
||||
" jo json_object_t;\n",
|
||||
" begin\n",
|
||||
" -- HuggingFace\n",
|
||||
" dbms_vector_chain.drop_credential(credential_name => 'HF_CRED');\n",
|
||||
" jo := json_object_t();\n",
|
||||
" jo.put('access_token', '<access_token>');\n",
|
||||
" dbms_vector_chain.create_credential(\n",
|
||||
" credential_name => 'HF_CRED',\n",
|
||||
" params => json(jo.to_string));\n",
|
||||
"\n",
|
||||
" -- OCIGENAI\n",
|
||||
" dbms_vector_chain.drop_credential(credential_name => 'OCI_CRED');\n",
|
||||
" jo := json_object_t();\n",
|
||||
" jo.put('user_ocid','<user_ocid>');\n",
|
||||
" jo.put('tenancy_ocid','<tenancy_ocid>');\n",
|
||||
" jo.put('compartment_ocid','<compartment_ocid>');\n",
|
||||
" jo.put('private_key','<private_key>');\n",
|
||||
" jo.put('fingerprint','<fingerprint>');\n",
|
||||
" dbms_vector_chain.create_credential(\n",
|
||||
" credential_name => 'OCI_CRED',\n",
|
||||
" params => json(jo.to_string));\n",
|
||||
" end;\n",
|
||||
" \"\"\"\n",
|
||||
" )\n",
|
||||
" cursor.close()\n",
|
||||
" print(\"Credentials created.\")\n",
|
||||
"except Exception as ex:\n",
|
||||
" cursor.close()\n",
|
||||
" raise"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Documents\n",
|
||||
"The users can load the documents from Oracle Database or a file system or both. They just need to set the loader parameters accordingly. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"\n",
|
||||
"The main benefit of using OracleDocLoader is that it can handle 150+ different file formats. You don't need to use different types of loader for different file formats. Here is the list formats that we support: [Oracle Text Supported Document Formats](https://docs.oracle.com/en/database/oracle/oracle-database/23/ccref/oracle-text-supported-document-formats.html)\n",
|
||||
"\n",
|
||||
"The following sample code will show how to do that:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of docs loaded: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.oracleai import OracleDocLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# loading from Oracle Database table\n",
|
||||
"# make sure you have the table with this specification\n",
|
||||
"loader_params = {}\n",
|
||||
"loader_params = {\n",
|
||||
" \"owner\": \"testuser\",\n",
|
||||
" \"tablename\": \"demo_tab\",\n",
|
||||
" \"colname\": \"data\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\"\"\" load the docs \"\"\"\n",
|
||||
"loader = OracleDocLoader(conn=conn, params=loader_params)\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of docs loaded: {len(docs)}\")\n",
|
||||
"# print(f\"Document-0: {docs[0].page_content}\") # content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Summary\n",
|
||||
"Now that the user loaded the documents, they may want to generate a summary for each document. The Oracle AI Vector Search Langchain library provides an API to do that. There are a few summary generation provider options including Database, OCIGENAI, HuggingFace and so on. The users can choose their preferred provider to generate a summary. Like before, they just need to set the summary parameters accordingly. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** The users may need to set proxy if they want to use some 3rd party summary generation providers other than Oracle's in-house and default provider: 'database'. If you don't have proxy, please remove the proxy parameter when you instantiate the OracleSummary."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# proxy to be used when we instantiate summary and embedder object\n",
|
||||
"proxy = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following sample code will show how to generate summary:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of Summaries: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.utilities.oracleai import OracleSummary\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# using 'database' provider\n",
|
||||
"summary_params = {\n",
|
||||
" \"provider\": \"database\",\n",
|
||||
" \"glevel\": \"S\",\n",
|
||||
" \"numParagraphs\": 1,\n",
|
||||
" \"language\": \"english\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# get the summary instance\n",
|
||||
"# Remove proxy if not required\n",
|
||||
"summ = OracleSummary(conn=conn, params=summary_params, proxy=proxy)\n",
|
||||
"\n",
|
||||
"list_summary = []\n",
|
||||
"for doc in docs:\n",
|
||||
" summary = summ.get_summary(doc.page_content)\n",
|
||||
" list_summary.append(summary)\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of Summaries: {len(list_summary)}\")\n",
|
||||
"# print(f\"Summary-0: {list_summary[0]}\") #content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Split Documents\n",
|
||||
"The documents can be in different sizes: small, medium, large, or very large. The users like to split/chunk their documents into smaller pieces to generate embeddings. There are lots of different splitting customizations the users can do. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"\n",
|
||||
"The following sample code will show how to do that:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of Chunks: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.oracleai import OracleTextSplitter\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# split by default parameters\n",
|
||||
"splitter_params = {\"normalize\": \"all\"}\n",
|
||||
"\n",
|
||||
"\"\"\" get the splitter instance \"\"\"\n",
|
||||
"splitter = OracleTextSplitter(conn=conn, params=splitter_params)\n",
|
||||
"\n",
|
||||
"list_chunks = []\n",
|
||||
"for doc in docs:\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" list_chunks.extend(chunks)\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of Chunks: {len(list_chunks)}\")\n",
|
||||
"# print(f\"Chunk-0: {list_chunks[0]}\") # content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate Embeddings\n",
|
||||
"Now that the documents are chunked as per requirements, the users may want to generate embeddings for these chunks. Oracle AI Vector Search provides a number of ways to generate embeddings. The users can load an ONNX embedding model to Oracle Database and use it to generate embeddings or use some 3rd party API's end points to generate embeddings. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"***Note:*** The users may need to set proxy if they want to use some 3rd party embedding generation providers other than 'database' provider (aka using ONNX model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# proxy to be used when we instantiate summary and embedder object\n",
|
||||
"proxy = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following sample code will show how to generate embeddings:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Number of embeddings: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.oracleai import OracleEmbeddings\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"# using ONNX model loaded to Oracle Database\n",
|
||||
"embedder_params = {\"provider\": \"database\", \"model\": \"demo_model\"}\n",
|
||||
"\n",
|
||||
"# get the embedding instance\n",
|
||||
"# Remove proxy if not required\n",
|
||||
"embedder = OracleEmbeddings(conn=conn, params=embedder_params, proxy=proxy)\n",
|
||||
"\n",
|
||||
"embeddings = []\n",
|
||||
"for doc in docs:\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" for chunk in chunks:\n",
|
||||
" embed = embedder.embed_query(chunk)\n",
|
||||
" embeddings.append(embed)\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of embeddings: {len(embeddings)}\")\n",
|
||||
"# print(f\"Embedding-0: {embeddings[0]}\") # content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create Oracle AI Vector Store\n",
|
||||
"Now that you know how to use Oracle AI Langchain library APIs individually to process the documents, let us show how to integrate with Oracle AI Vector Store to facilitate the semantic searches."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, let's import all the dependencies."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 52,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"from langchain_community.document_loaders.oracleai import (\n",
|
||||
" OracleDocLoader,\n",
|
||||
" OracleTextSplitter,\n",
|
||||
")\n",
|
||||
"from langchain_community.embeddings.oracleai import OracleEmbeddings\n",
|
||||
"from langchain_community.utilities.oracleai import OracleSummary\n",
|
||||
"from langchain_community.vectorstores import oraclevs\n",
|
||||
"from langchain_community.vectorstores.oraclevs import OracleVS\n",
|
||||
"from langchain_community.vectorstores.utils import DistanceStrategy\n",
|
||||
"from langchain_core.documents import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Next, let's combine all document processing stages together. Here is the sample code below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Connection successful!\n",
|
||||
"ONNX model loaded.\n",
|
||||
"Number of total chunks with metadata: 3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\"\"\"\n",
|
||||
"In this sample example, we will use 'database' provider for both summary and embeddings.\n",
|
||||
"So, we don't need to do the followings:\n",
|
||||
" - set proxy for 3rd party providers\n",
|
||||
" - create credential for 3rd party providers\n",
|
||||
"\n",
|
||||
"If you choose to use 3rd party provider, \n",
|
||||
"please follow the necessary steps for proxy and credential.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"# oracle connection\n",
|
||||
"# please update with your username, password, hostname, and service_name\n",
|
||||
"username = \"\"\n",
|
||||
"password = \"\"\n",
|
||||
"dsn = \"\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Connection failed!\")\n",
|
||||
" sys.exit(1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# load onnx model\n",
|
||||
"# please update with your related information\n",
|
||||
"onnx_dir = \"DEMO_PY_DIR\"\n",
|
||||
"onnx_file = \"tinybert.onnx\"\n",
|
||||
"model_name = \"demo_model\"\n",
|
||||
"try:\n",
|
||||
" OracleEmbeddings.load_onnx_model(conn, onnx_dir, onnx_file, model_name)\n",
|
||||
" print(\"ONNX model loaded.\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"ONNX model loading failed!\")\n",
|
||||
" sys.exit(1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# params\n",
|
||||
"# please update necessary fields with related information\n",
|
||||
"loader_params = {\n",
|
||||
" \"owner\": \"testuser\",\n",
|
||||
" \"tablename\": \"demo_tab\",\n",
|
||||
" \"colname\": \"data\",\n",
|
||||
"}\n",
|
||||
"summary_params = {\n",
|
||||
" \"provider\": \"database\",\n",
|
||||
" \"glevel\": \"S\",\n",
|
||||
" \"numParagraphs\": 1,\n",
|
||||
" \"language\": \"english\",\n",
|
||||
"}\n",
|
||||
"splitter_params = {\"normalize\": \"all\"}\n",
|
||||
"embedder_params = {\"provider\": \"database\", \"model\": \"demo_model\"}\n",
|
||||
"\n",
|
||||
"# instantiate loader, summary, splitter, and embedder\n",
|
||||
"loader = OracleDocLoader(conn=conn, params=loader_params)\n",
|
||||
"summary = OracleSummary(conn=conn, params=summary_params)\n",
|
||||
"splitter = OracleTextSplitter(conn=conn, params=splitter_params)\n",
|
||||
"embedder = OracleEmbeddings(conn=conn, params=embedder_params)\n",
|
||||
"\n",
|
||||
"# process the documents\n",
|
||||
"chunks_with_mdata = []\n",
|
||||
"for id, doc in enumerate(docs, start=1):\n",
|
||||
" summ = summary.get_summary(doc.page_content)\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" for ic, chunk in enumerate(chunks, start=1):\n",
|
||||
" chunk_metadata = doc.metadata.copy()\n",
|
||||
" chunk_metadata[\"id\"] = chunk_metadata[\"_oid\"] + \"$\" + str(id) + \"$\" + str(ic)\n",
|
||||
" chunk_metadata[\"document_id\"] = str(id)\n",
|
||||
" chunk_metadata[\"document_summary\"] = str(summ[0])\n",
|
||||
" chunks_with_mdata.append(\n",
|
||||
" Document(page_content=str(chunk), metadata=chunk_metadata)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of total chunks with metadata: {len(chunks_with_mdata)}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"At this point, we have processed the documents and generated chunks with metadata. Next, we will create Oracle AI Vector Store with those chunks.\n",
|
||||
"\n",
|
||||
"Here is the sample code how to do that:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Vector Store Table: oravs\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# create Oracle AI Vector Store\n",
|
||||
"vectorstore = OracleVS.from_documents(\n",
|
||||
" chunks_with_mdata,\n",
|
||||
" embedder,\n",
|
||||
" client=conn,\n",
|
||||
" table_name=\"oravs\",\n",
|
||||
" distance_strategy=DistanceStrategy.DOT_PRODUCT,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Vector Store Table: {vectorstore.table_name}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above example creates a vector store with DOT_PRODUCT distance strategy. \n",
|
||||
"\n",
|
||||
"However, the users can create Oracle AI Vector Store provides different distance strategies. Please see the [comprehensive guide](/docs/integrations/vectorstores/oracle) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now that we have embeddings stored in vector stores, let's create an index on them to get better semantic search performance during query time.\n",
|
||||
"\n",
|
||||
"***Note*** If you are getting some insufficient memory error, please increase ***vector_memory_size*** in your database.\n",
|
||||
"\n",
|
||||
"Here is the sample code to create an index:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"oraclevs.create_index(\n",
|
||||
" conn, vectorstore, params={\"idx_name\": \"hnsw_oravs\", \"idx_type\": \"HNSW\"}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(\"Index created.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The above example creates a default HNSW index on the embeddings stored in 'oravs' table. The users can set different parameters as per their requirements. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"\n",
|
||||
"Also, there are different types of vector indices that the users can create. Please see the [comprehensive guide](/docs/integrations/vectorstores/oracle) for more information.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Perform Semantic Search\n",
|
||||
"All set!\n",
|
||||
"\n",
|
||||
"We have processed the documents, stored them to vector store, and then created index to get better query performance. Now let's do some semantic searches.\n",
|
||||
"\n",
|
||||
"Here is the sample code for this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table. Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.', metadata={'_oid': '662f2f257677f3c2311a8ff999fd34e5', '_rowid': 'AAAR/xAAEAAAAAnAAC', 'id': '662f2f257677f3c2311a8ff999fd34e5$3$1', 'document_id': '3', 'document_summary': 'Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\\n\\n'})]\n",
|
||||
"[]\n",
|
||||
"[(Document(page_content='The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table. Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.', metadata={'_oid': '662f2f257677f3c2311a8ff999fd34e5', '_rowid': 'AAAR/xAAEAAAAAnAAC', 'id': '662f2f257677f3c2311a8ff999fd34e5$3$1', 'document_id': '3', 'document_summary': 'Sometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\\n\\n'}), 0.055675752460956573)]\n",
|
||||
"[]\n",
|
||||
"[Document(page_content='If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.', metadata={'_oid': '662f2f253acf96b33b430b88699490a2', '_rowid': 'AAAR/xAAEAAAAAnAAA', 'id': '662f2f253acf96b33b430b88699490a2$1$1', 'document_id': '1', 'document_summary': 'If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\\n\\n'})]\n",
|
||||
"[Document(page_content='If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.', metadata={'_oid': '662f2f253acf96b33b430b88699490a2', '_rowid': 'AAAR/xAAEAAAAAnAAA', 'id': '662f2f253acf96b33b430b88699490a2$1$1', 'document_id': '1', 'document_summary': 'If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\\n\\n'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What is Oracle AI Vector Store?\"\n",
|
||||
"filter = {\"document_id\": [\"1\"]}\n",
|
||||
"\n",
|
||||
"# Similarity search without a filter\n",
|
||||
"print(vectorstore.similarity_search(query, 1))\n",
|
||||
"\n",
|
||||
"# Similarity search with a filter\n",
|
||||
"print(vectorstore.similarity_search(query, 1, filter=filter))\n",
|
||||
"\n",
|
||||
"# Similarity search with relevance score\n",
|
||||
"print(vectorstore.similarity_search_with_score(query, 1))\n",
|
||||
"\n",
|
||||
"# Similarity search with relevance score with filter\n",
|
||||
"print(vectorstore.similarity_search_with_score(query, 1, filter=filter))\n",
|
||||
"\n",
|
||||
"# Max marginal relevance search\n",
|
||||
"print(vectorstore.max_marginal_relevance_search(query, 1, fetch_k=20, lambda_mult=0.5))\n",
|
||||
"\n",
|
||||
"# Max marginal relevance search with filter\n",
|
||||
"print(\n",
|
||||
" vectorstore.max_marginal_relevance_search(\n",
|
||||
" query, 1, fetch_k=20, lambda_mult=0.5, filter=filter\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -129,7 +129,7 @@
|
||||
" return obs_message\n",
|
||||
"\n",
|
||||
" def _act(self):\n",
|
||||
" act_message = self.model(self.message_history)\n",
|
||||
" act_message = self.model.invoke(self.message_history)\n",
|
||||
" self.message_history.append(act_message)\n",
|
||||
" action = int(self.action_parser.parse(act_message.content)[\"action\"])\n",
|
||||
" return action\n",
|
||||
|
||||
@@ -168,7 +168,7 @@
|
||||
"\n",
|
||||
"retriever = vector_store.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 3})\n",
|
||||
"\n",
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"<your question>\")\n",
|
||||
"retrieved_docs = retriever.invoke(\"<your question>\")\n",
|
||||
"\n",
|
||||
"print(retrieved_docs[0].page_content)\n",
|
||||
"\n",
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# RAG using Upstage Layout Analysis and Groundedness Check\n",
|
||||
"This example illustrates RAG using [Upstage](https://python.langchain.com/docs/integrations/providers/upstage/) Layout Analysis and Groundedness Check."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.vectorstores import DocArrayInMemorySearch\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_core.runnables.base import RunnableSerializable\n",
|
||||
"from langchain_upstage import (\n",
|
||||
" ChatUpstage,\n",
|
||||
" UpstageEmbeddings,\n",
|
||||
" UpstageGroundednessCheck,\n",
|
||||
" UpstageLayoutAnalysisLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model = ChatUpstage()\n",
|
||||
"\n",
|
||||
"files = [\"/PATH/TO/YOUR/FILE.pdf\", \"/PATH/TO/YOUR/FILE2.pdf\"]\n",
|
||||
"\n",
|
||||
"loader = UpstageLayoutAnalysisLoader(file_path=files, split=\"element\")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_documents(docs, embedding=UpstageEmbeddings())\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"output_parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"retrieved_docs = retriever.get_relevant_documents(\"How many parameters in SOLAR model?\")\n",
|
||||
"\n",
|
||||
"groundedness_check = UpstageGroundednessCheck()\n",
|
||||
"groundedness = \"\"\n",
|
||||
"while groundedness != \"grounded\":\n",
|
||||
" chain: RunnableSerializable = RunnablePassthrough() | prompt | model | output_parser\n",
|
||||
"\n",
|
||||
" result = chain.invoke(\n",
|
||||
" {\n",
|
||||
" \"context\": retrieved_docs,\n",
|
||||
" \"question\": \"How many parameters in SOLAR model?\",\n",
|
||||
" }\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" groundedness = groundedness_check.invoke(\n",
|
||||
" {\n",
|
||||
" \"context\": retrieved_docs,\n",
|
||||
" \"answer\": result,\n",
|
||||
" }\n",
|
||||
" )"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -355,15 +355,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"attribute_info[-2][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}\"\n",
|
||||
"attribute_info[3][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}\"\n",
|
||||
"attribute_info[-3][\n",
|
||||
" \"description\"\n",
|
||||
"] += f\". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}\""
|
||||
"attribute_info[-2][\"description\"] += (\n",
|
||||
" f\". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}\"\n",
|
||||
")\n",
|
||||
"attribute_info[3][\"description\"] += (\n",
|
||||
" f\". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}\"\n",
|
||||
")\n",
|
||||
"attribute_info[-3][\"description\"] += (\n",
|
||||
" f\". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -688,9 +688,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"attribute_info[-3][\n",
|
||||
" \"description\"\n",
|
||||
"] += \". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter.\"\n",
|
||||
"attribute_info[-3][\"description\"] += (\n",
|
||||
" \". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter.\"\n",
|
||||
")\n",
|
||||
"chain = load_query_constructor_runnable(\n",
|
||||
" ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n",
|
||||
" doc_contents,\n",
|
||||
@@ -1227,7 +1227,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"results = retriever.get_relevant_documents(\n",
|
||||
"results = retriever.invoke(\n",
|
||||
" \"I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace.\"\n",
|
||||
")\n",
|
||||
"for res in results:\n",
|
||||
|
||||
@@ -84,7 +84,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
" Applies the chatmodel to the message history\n",
|
||||
" and returns the message string\n",
|
||||
" \"\"\"\n",
|
||||
" message = self.model(\n",
|
||||
" message = self.model.invoke(\n",
|
||||
" [\n",
|
||||
" self.system_message,\n",
|
||||
" HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n",
|
||||
|
||||
1
docs/.gitignore
vendored
1
docs/.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
/.quarto/
|
||||
src/supabase.d.ts
|
||||
build
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o xtrace
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")"; pwd)"
|
||||
cd "${SCRIPT_DIR}"
|
||||
|
||||
mkdir -p ../_dist
|
||||
rsync -ruv --exclude node_modules --exclude api_reference --exclude .venv --exclude .docusaurus . ../_dist
|
||||
cd ../_dist
|
||||
poetry run python scripts/model_feat_table.py
|
||||
cp ../cookbook/README.md src/pages/cookbook.mdx
|
||||
mkdir -p docs/templates
|
||||
cp ../templates/docs/INDEX.md docs/templates/index.md
|
||||
poetry run python scripts/copy_templates.py
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O docs/langserve.md
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langgraph/main/README.md -O docs/langgraph.md
|
||||
|
||||
yarn
|
||||
|
||||
poetry run quarto preview docs
|
||||
82
docs/Makefile
Normal file
82
docs/Makefile
Normal file
@@ -0,0 +1,82 @@
|
||||
# we build the docs in these stages:
|
||||
# 1. install vercel and python dependencies
|
||||
# 2. copy files from "source dir" to "intermediate dir"
|
||||
# 2. generate files like model feat table, etc in "intermediate dir"
|
||||
# 3. copy files to their right spots (e.g. langserve readme) in "intermediate dir"
|
||||
# 4. build the docs from "intermediate dir" to "output dir"
|
||||
|
||||
SOURCE_DIR = docs/
|
||||
INTERMEDIATE_DIR = build/intermediate/docs
|
||||
|
||||
OUTPUT_NEW_DIR = build/output-new
|
||||
OUTPUT_NEW_DOCS_DIR = $(OUTPUT_NEW_DIR)/docs
|
||||
|
||||
PYTHON = .venv/bin/python
|
||||
|
||||
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm|ai21" | tr '\n' ' ')
|
||||
|
||||
PORT ?= 3001
|
||||
|
||||
clean:
|
||||
rm -rf build
|
||||
|
||||
install-vercel-deps:
|
||||
yum -y update
|
||||
yum install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip rsync -y
|
||||
|
||||
install-py-deps:
|
||||
python3 -m venv .venv
|
||||
$(PYTHON) -m pip install --upgrade pip
|
||||
$(PYTHON) -m pip install --upgrade uv
|
||||
$(PYTHON) -m uv pip install -r vercel_requirements.txt
|
||||
$(PYTHON) -m uv pip install --editable $(PARTNER_DEPS_LIST)
|
||||
|
||||
generate-files:
|
||||
mkdir -p $(INTERMEDIATE_DIR)
|
||||
cp -r $(SOURCE_DIR)/* $(INTERMEDIATE_DIR)
|
||||
mkdir -p $(INTERMEDIATE_DIR)/templates
|
||||
cp ../templates/docs/INDEX.md $(INTERMEDIATE_DIR)/templates/index.md
|
||||
cp ../cookbook/README.md $(INTERMEDIATE_DIR)/cookbook.mdx
|
||||
|
||||
$(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR)
|
||||
|
||||
$(PYTHON) scripts/copy_templates.py $(INTERMEDIATE_DIR)
|
||||
|
||||
wget -q https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O $(INTERMEDIATE_DIR)/langserve.md
|
||||
$(PYTHON) scripts/resolve_local_links.py $(INTERMEDIATE_DIR)/langserve.md https://github.com/langchain-ai/langserve/tree/main/
|
||||
|
||||
copy-infra:
|
||||
mkdir -p $(OUTPUT_NEW_DIR)
|
||||
cp -r src $(OUTPUT_NEW_DIR)
|
||||
cp vercel.json $(OUTPUT_NEW_DIR)
|
||||
cp babel.config.js $(OUTPUT_NEW_DIR)
|
||||
cp -r data $(OUTPUT_NEW_DIR)
|
||||
cp docusaurus.config.js $(OUTPUT_NEW_DIR)
|
||||
cp package.json $(OUTPUT_NEW_DIR)
|
||||
cp sidebars.js $(OUTPUT_NEW_DIR)
|
||||
cp -r static $(OUTPUT_NEW_DIR)
|
||||
cp yarn.lock $(OUTPUT_NEW_DIR)
|
||||
|
||||
render:
|
||||
$(PYTHON) scripts/notebook_convert.py $(INTERMEDIATE_DIR) $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
md-sync:
|
||||
rsync -avm --include="*/" --include="*.mdx" --include="*.md" --include="*.png" --exclude="*" $(INTERMEDIATE_DIR)/ $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
generate-references:
|
||||
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
|
||||
|
||||
build: install-py-deps generate-files copy-infra render md-sync generate-references
|
||||
|
||||
vercel-build: install-vercel-deps build
|
||||
rm -rf docs
|
||||
mv $(OUTPUT_NEW_DOCS_DIR) docs
|
||||
rm -rf build
|
||||
yarn run docusaurus build
|
||||
mv build v0.1
|
||||
mkdir build
|
||||
mv v0.1 build
|
||||
mv build/v0.1/404.html build
|
||||
|
||||
start:
|
||||
cd $(OUTPUT_NEW_DIR) && yarn && yarn start --port=$(PORT)
|
||||
File diff suppressed because one or more lines are too long
@@ -1,76 +0,0 @@
|
||||
/* eslint-disable prefer-template */
|
||||
/* eslint-disable no-param-reassign */
|
||||
// eslint-disable-next-line import/no-extraneous-dependencies
|
||||
const babel = require("@babel/core");
|
||||
const path = require("path");
|
||||
const fs = require("fs");
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {string|Buffer} content Content of the resource file
|
||||
* @param {object} [map] SourceMap data consumable by https://github.com/mozilla/source-map
|
||||
* @param {any} [meta] Meta data, could be anything
|
||||
*/
|
||||
async function webpackLoader(content, map, meta) {
|
||||
const cb = this.async();
|
||||
|
||||
if (!this.resourcePath.endsWith(".ts")) {
|
||||
cb(null, JSON.stringify({ content, imports: [] }), map, meta);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await babel.parseAsync(content, {
|
||||
sourceType: "module",
|
||||
filename: this.resourcePath,
|
||||
});
|
||||
|
||||
const imports = [];
|
||||
|
||||
result.program.body.forEach((node) => {
|
||||
if (node.type === "ImportDeclaration") {
|
||||
const source = node.source.value;
|
||||
|
||||
if (!source.startsWith("langchain")) {
|
||||
return;
|
||||
}
|
||||
|
||||
node.specifiers.forEach((specifier) => {
|
||||
if (specifier.type === "ImportSpecifier") {
|
||||
const local = specifier.local.name;
|
||||
const imported = specifier.imported.name;
|
||||
imports.push({ local, imported, source });
|
||||
} else {
|
||||
throw new Error("Unsupported import type");
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
imports.forEach((imp) => {
|
||||
const { imported, source } = imp;
|
||||
const moduleName = source.split("/").slice(1).join("_");
|
||||
const docsPath = path.resolve(__dirname, "docs", "api", moduleName);
|
||||
const available = fs.readdirSync(docsPath, { withFileTypes: true });
|
||||
const found = available.find(
|
||||
(dirent) =>
|
||||
dirent.isDirectory() &&
|
||||
fs.existsSync(path.resolve(docsPath, dirent.name, imported + ".md"))
|
||||
);
|
||||
if (found) {
|
||||
imp.docs =
|
||||
"/" + path.join("docs", "api", moduleName, found.name, imported);
|
||||
} else {
|
||||
throw new Error(
|
||||
`Could not find docs for ${source}.${imported} in docs/api/`
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
cb(null, JSON.stringify({ content, imports }), map, meta);
|
||||
} catch (err) {
|
||||
cb(err);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = webpackLoader;
|
||||
1356
docs/data/people.yml
1356
docs/data/people.yml
File diff suppressed because it is too large
Load Diff
@@ -7,9 +7,9 @@
|
||||
"source": [
|
||||
"# Create a runnable with the @chain decorator\n",
|
||||
"\n",
|
||||
"You can also turn an arbitrary function into a chain by adding a `@chain` decorator. This is functionaly equivalent to wrapping in a [`RunnableLambda`](/docs/expression_language/primitives/functions).\n",
|
||||
"You can also turn an arbitrary function into a chain by adding a `@chain` decorator. This is functionally equivalent to wrapping in a [`RunnableLambda`](/docs/expression_language/primitives/functions).\n",
|
||||
"\n",
|
||||
"This will have the benefit of improved observability by tracing your chain correctly. Any calls to runnables inside this function will be traced as nested childen.\n",
|
||||
"This will have the benefit of improved observability by tracing your chain correctly. Any calls to runnables inside this function will be traced as nested children.\n",
|
||||
"\n",
|
||||
"It will also allow you to use this as any other runnable, compose it in chain, etc.\n",
|
||||
"\n",
|
||||
|
||||
@@ -4,6 +4,7 @@ sidebar_class_name: hidden
|
||||
|
||||
# LangChain Expression Language (LCEL)
|
||||
|
||||
|
||||
LangChain Expression Language, or LCEL, is a declarative way to easily compose chains together.
|
||||
LCEL was designed from day 1 to **support putting prototypes in production, with no code changes**, from the simplest “prompt + LLM” chain to the most complex chains (we’ve seen folks successfully run LCEL chains with 100s of steps in production). To highlight a few of the reasons you might want to use LCEL:
|
||||
|
||||
@@ -11,7 +12,7 @@ LCEL was designed from day 1 to **support putting prototypes in production, with
|
||||
When you build your chains with LCEL you get the best possible time-to-first-token (time elapsed until the first chunk of output comes out). For some chains this means eg. we stream tokens straight from an LLM to a streaming output parser, and you get back parsed, incremental chunks of output at the same rate as the LLM provider outputs the raw tokens.
|
||||
|
||||
[**Async support**](/docs/expression_language/interface)
|
||||
Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/docs/langsmith) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server.
|
||||
Any chain built with LCEL can be called both with the synchronous API (eg. in your Jupyter notebook while prototyping) as well as with the asynchronous API (eg. in a [LangServe](/docs/langserve) server). This enables using the same code for prototypes and in production, with great performance, and the ability to handle many concurrent requests in the same server.
|
||||
|
||||
[**Optimized parallel execution**](/docs/expression_language/primitives/parallel)
|
||||
Whenever your LCEL chains have steps that can be executed in parallel (eg if you fetch documents from multiple retrievers) we automatically do it, both in the sync and the async interfaces, for the smallest possible latency.
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
"id": "9a9acd2e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
|
||||
"To make it as easy as possible to create custom chains, we've implemented a [\"Runnable\"](https://api.python.langchain.com/en/stable/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about [in this section](/docs/expression_language/primitives).\n",
|
||||
"\n",
|
||||
"This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way. \n",
|
||||
|
||||
@@ -3,16 +3,18 @@
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "bc346658-6820-413a-bd8f-11bd3082fe43",
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_position: 0.5\n",
|
||||
"title: Advantages of LCEL\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
|
||||
"```"
|
||||
"import { ColumnContainer, Column } from \"@theme/Columns\";"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -20,6 +22,7 @@
|
||||
"id": "919a5ae2-ed21-4923-b98f-723c111bac67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
":::{.callout-tip} \n",
|
||||
"We recommend reading the LCEL [Get started](/docs/expression_language/get_started) section first.\n",
|
||||
":::"
|
||||
@@ -56,13 +59,10 @@
|
||||
"## Invoke\n",
|
||||
"In the simplest case, we just want to pass in a topic string and get back a joke string:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n"
|
||||
]
|
||||
},
|
||||
@@ -102,11 +102,9 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
@@ -146,18 +144,15 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Stream\n",
|
||||
"If we want to stream results instead, we'll need to change our function:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@@ -198,11 +193,10 @@
|
||||
"id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
@@ -223,19 +217,18 @@
|
||||
"id": "b9b41e78-ddeb-44d0-a58b-a0ea0c99a761",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Batch\n",
|
||||
"\n",
|
||||
"If we want to run on a batch of inputs in parallel, we'll again need a new function:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@@ -263,11 +256,11 @@
|
||||
"id": "9b3e9d34-6775-43c1-93d8-684b58e341ab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
@@ -287,18 +280,14 @@
|
||||
"id": "cc5ba36f-eec1-4fc1-8cfe-fa242a7f7809",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"## Async\n",
|
||||
"\n",
|
||||
"If we need an asynchronous version:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@@ -334,11 +323,9 @@
|
||||
"id": "2f209290-498c-4c17-839e-ee9002919846",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
@@ -359,10 +346,9 @@
|
||||
"id": "1f282129-99a3-40f4-b67f-2d0718b1bea9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Async Batch\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
|
||||
@@ -4,6 +4,7 @@ sidebar_position: 2
|
||||
|
||||
# Installation
|
||||
|
||||
|
||||
## Official release
|
||||
|
||||
To install LangChain run:
|
||||
|
||||
@@ -5,6 +5,7 @@ sidebar_class_name: hidden
|
||||
|
||||
# Introduction
|
||||
|
||||
|
||||
**LangChain** is a framework for developing applications powered by large language models (LLMs).
|
||||
|
||||
LangChain simplifies every stage of the LLM application lifecycle:
|
||||
@@ -13,12 +14,13 @@ LangChain simplifies every stage of the LLM application lifecycle:
|
||||
- **Deployment**: Turn any chain into an API with [LangServe](/docs/langserve).
|
||||
|
||||
import ThemedImage from '@theme/ThemedImage';
|
||||
import useBaseUrl from '@docusaurus/useBaseUrl';
|
||||
|
||||
<ThemedImage
|
||||
alt="Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers."
|
||||
sources={{
|
||||
light: '/svg/langchain_stack.svg',
|
||||
dark: '/svg/langchain_stack_dark.svg',
|
||||
light: useBaseUrl('/svg/langchain_stack.svg'),
|
||||
dark: useBaseUrl('/svg/langchain_stack_dark.svg'),
|
||||
}}
|
||||
title="LangChain Framework Overview"
|
||||
/>
|
||||
@@ -29,7 +31,7 @@ Concretely, the framework consists of the following open-source libraries:
|
||||
- **`langchain-community`**: Third party integrations.
|
||||
- Partner packages (e.g. **`langchain-openai`**, **`langchain-anthropic`**, etc.): Some integrations have been further split into their own lightweight packages that only depend on **`langchain-core`**.
|
||||
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
- **[langgraph](/docs/langgraph)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
- **[langgraph](https://langchain-ai.github.io/langgraph/)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
|
||||
- **[langserve](/docs/langserve)**: Deploy LangChain chains as REST APIs.
|
||||
|
||||
The broader ecosystem includes:
|
||||
@@ -73,7 +75,7 @@ LangChain Expression Language (LCEL) is the foundation of many of LangChain's co
|
||||
### [🦜🛠️ LangSmith](/docs/langsmith)
|
||||
Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production.
|
||||
|
||||
### [🦜🕸️ LangGraph](/docs/langgraph)
|
||||
### [🦜🕸️ LangGraph](https://langchain-ai.github.io/langgraph/)
|
||||
Build stateful, multi-actor applications with LLMs, built on top of (and intended to be used with) LangChain primitives.
|
||||
|
||||
### [🦜🏓 LangServe](/docs/langserve)
|
||||
|
||||
@@ -4,6 +4,7 @@ sidebar_position: 1
|
||||
|
||||
# Quickstart
|
||||
|
||||
|
||||
In this quickstart we'll show you how to:
|
||||
- Get setup with LangChain, LangSmith and LangServe
|
||||
- Use the most basic and common components of LangChain: prompt templates, models, and output parsers
|
||||
@@ -194,7 +195,7 @@ Prompt templates convert raw user input to better input to the LLM.
|
||||
```python
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", "You are world class technical documentation writer."),
|
||||
("system", "You are a world class technical documentation writer."),
|
||||
("user", "{input}")
|
||||
])
|
||||
```
|
||||
|
||||
@@ -17,7 +17,7 @@ Here's a summary of the key methods and properties of a comparison evaluator:
|
||||
- `requires_reference`: This property specifies whether this evaluator requires a reference label.
|
||||
|
||||
:::note LangSmith Support
|
||||
The [run_on_dataset](https://api.python.langchain.com/en/latest/langchain_api_reference.html#module-langchain.smith) evaluation method is designed to evaluate only a single model at a time, and thus, doesn't support these evaluators.
|
||||
Pairwise evaluations are supported in LangSmith via the [`evaluate_comparative`](https://docs.smith.langchain.com/how_to_guides/evaluation/evaluate_pairwise) function.
|
||||
:::
|
||||
|
||||
Detailed information about creating custom evaluators and the available built-in comparison evaluators is provided in the following sections.
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"This notebook shows how to prevent prompt injection attacks using the text classification model from `HuggingFace`.\n",
|
||||
"\n",
|
||||
"By default, it uses a *[laiyer/deberta-v3-base-prompt-injection](https://huggingface.co/laiyer/deberta-v3-base-prompt-injection)* model trained to identify prompt injections. \n",
|
||||
"By default, it uses a *[protectai/deberta-v3-base-prompt-injection-v2](https://huggingface.co/protectai/deberta-v3-base-prompt-injection-v2)* model trained to identify prompt injections. \n",
|
||||
"\n",
|
||||
"In this notebook, we will use the ONNX version of the model to speed up the inference. "
|
||||
]
|
||||
@@ -49,11 +49,15 @@
|
||||
"from optimum.onnxruntime import ORTModelForSequenceClassification\n",
|
||||
"from transformers import AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"# Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection\n",
|
||||
"model_path = \"laiyer/deberta-v3-base-prompt-injection\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_path)\n",
|
||||
"tokenizer.model_input_names = [\"input_ids\", \"attention_mask\"] # Hack to run the model\n",
|
||||
"model = ORTModelForSequenceClassification.from_pretrained(model_path, subfolder=\"onnx\")\n",
|
||||
"# Using https://huggingface.co/protectai/deberta-v3-base-prompt-injection-v2\n",
|
||||
"model_path = \"protectai/deberta-v3-base-prompt-injection-v2\"\n",
|
||||
"revision = None # We recommend specifiying the revision to avoid breaking changes or supply chain attacks\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(\n",
|
||||
" model_path, revision=revision, model_input_names=[\"input_ids\", \"attention_mask\"]\n",
|
||||
")\n",
|
||||
"model = ORTModelForSequenceClassification.from_pretrained(\n",
|
||||
" model_path, revision=revision, subfolder=\"onnx\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"classifier = pipeline(\n",
|
||||
" \"text-classification\",\n",
|
||||
|
||||
@@ -194,7 +194,7 @@
|
||||
"llm = OpenAI(\n",
|
||||
" temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n",
|
||||
")\n",
|
||||
"print(llm(\"Tell me a joke\"))"
|
||||
"print(llm.invoke(\"Tell me a joke\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -270,7 +270,7 @@
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"llm_results = chat_llm(\n",
|
||||
"llm_results = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"Always use a lot of emojis\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
|
||||
@@ -107,7 +107,7 @@ User tracking allows you to identify your users, track their cost, conversations
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler, identify
|
||||
|
||||
with identify("user-123"):
|
||||
llm("Tell me a joke")
|
||||
llm.invoke("Tell me a joke")
|
||||
|
||||
with identify("user-456", user_props={"email": "user456@test.com"}):
|
||||
agen.run("Who is Leo DiCaprio's girlfriend?")
|
||||
|
||||
@@ -103,7 +103,7 @@
|
||||
" temperature=0,\n",
|
||||
" callbacks=[PromptLayerCallbackHandler(pl_tags=[\"chatopenai\"])],\n",
|
||||
")\n",
|
||||
"llm_results = chat_llm(\n",
|
||||
"llm_results = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(content=\"What comes after 1,2,3 ?\"),\n",
|
||||
" HumanMessage(content=\"Tell me another joke?\"),\n",
|
||||
@@ -129,10 +129,11 @@
|
||||
"from langchain_community.llms import GPT4All\n",
|
||||
"\n",
|
||||
"model = GPT4All(model=\"./models/gpt4all-model.bin\", n_ctx=512, n_threads=8)\n",
|
||||
"callbacks = [PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])]\n",
|
||||
"\n",
|
||||
"response = model(\n",
|
||||
"response = model.invoke(\n",
|
||||
" \"Once upon a time, \",\n",
|
||||
" callbacks=[PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])],\n",
|
||||
" config={\"callbacks\": callbacks},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -181,7 +182,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"example_prompt = promptlayer.prompts.get(\"example\", version=1, langchain=True)\n",
|
||||
"openai_llm(example_prompt.format(product=\"toasters\"))"
|
||||
"openai_llm.invoke(example_prompt.format(product=\"toasters\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -315,7 +315,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_res = chat_llm(\n",
|
||||
"chat_res = chat_llm.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"Every answer of yours must be about OpenAI.\"),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output = chat([HumanMessage(content=\"write a funny joke\")])\n",
|
||||
"output = chat.invoke([HumanMessage(content=\"write a funny joke\")])\n",
|
||||
"print(\"output:\", output)"
|
||||
]
|
||||
},
|
||||
@@ -90,7 +90,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"kwargs = {\"temperature\": 0.8, \"top_p\": 0.8, \"top_k\": 5}\n",
|
||||
"output = chat([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
|
||||
"output = chat.invoke([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
|
||||
"print(\"output:\", output)"
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because one or more lines are too long
181
docs/docs/integrations/chat/coze.ipynb
Normal file
181
docs/docs/integrations/chat/coze.ipynb
Normal file
@@ -0,0 +1,181 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Coze Chat\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Chat with Coze Bot\n",
|
||||
"\n",
|
||||
"ChatCoze chat models API by coze.com. For more information, see [https://www.coze.com/open/docs/chat](https://www.coze.com/open/docs/chat)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-25T15:14:24.186131Z",
|
||||
"start_time": "2024-04-25T15:14:23.831767Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatCoze\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-25T15:14:24.191123Z",
|
||||
"start_time": "2024-04-25T15:14:24.186330Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatCoze(\n",
|
||||
" coze_api_base=\"YOUR_API_BASE\",\n",
|
||||
" coze_api_key=\"YOUR_API_KEY\",\n",
|
||||
" bot_id=\"YOUR_BOT_ID\",\n",
|
||||
" user=\"YOUR_USER_ID\",\n",
|
||||
" conversation_id=\"YOUR_CONVERSATION_ID\",\n",
|
||||
" streaming=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, you can set your API key and API base with:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"COZE_API_KEY\"] = \"YOUR_API_KEY\"\n",
|
||||
"os.environ[\"COZE_API_BASE\"] = \"YOUR_API_BASE\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-25T15:14:25.853218Z",
|
||||
"start_time": "2024-04-25T15:14:24.192408Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='为你找到关于coze的信息如下:\n\nCoze是一个由字节跳动推出的AI聊天机器人和应用程序编辑开发平台。\n\n用户无论是否有编程经验,都可以通过该平台快速创建各种类型的聊天机器人、智能体、AI应用和插件,并将其部署在社交平台和即时聊天应用程序中。\n\n国际版使用的模型比国内版更强大。')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([HumanMessage(content=\"什么是扣子(coze)\")])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Chat with Coze Streaming"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-25T15:14:25.870044Z",
|
||||
"start_time": "2024-04-25T15:14:25.863381Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatCoze(\n",
|
||||
" coze_api_base=\"YOUR_API_BASE\",\n",
|
||||
" coze_api_key=\"YOUR_API_KEY\",\n",
|
||||
" bot_id=\"YOUR_BOT_ID\",\n",
|
||||
" user=\"YOUR_USER_ID\",\n",
|
||||
" conversation_id=\"YOUR_CONVERSATION_ID\",\n",
|
||||
" streaming=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-25T15:14:27.153546Z",
|
||||
"start_time": "2024-04-25T15:14:25.868470Z"
|
||||
},
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessageChunk(content='为你查询到Coze是一个由字节跳动推出的AI聊天机器人和应用程序编辑开发平台。')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat([HumanMessage(content=\"什么是扣子(coze)\")])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -8,13 +8,8 @@
|
||||
"source": [
|
||||
"# DeepInfra\n",
|
||||
"\n",
|
||||
"[DeepInfra](https://deepinfra.com/?utm_source=langchain) is a serverless inference as a service that provides access to a [variety of LLMs](https://deepinfra.com/models?utm_source=langchain) and [embeddings models](https://deepinfra.com/models?type=embeddings&utm_source=langchain). This notebook goes over how to use LangChain with DeepInfra for chat models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[DeepInfra](https://deepinfra.com/?utm_source=langchain) is a serverless inference as a service that provides access to a [variety of LLMs](https://deepinfra.com/models?utm_source=langchain) and [embeddings models](https://deepinfra.com/models?type=embeddings&utm_source=langchain). This notebook goes over how to use LangChain with DeepInfra for chat models.\n",
|
||||
"\n",
|
||||
"## Set the Environment API Key\n",
|
||||
"Make sure to get your API key from DeepInfra. You have to [Login](https://deepinfra.com/login?from=%2Fdash) and get a new token.\n",
|
||||
"\n",
|
||||
@@ -24,92 +19,34 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# get a new token: https://deepinfra.com/login?from=%2Fdash\n",
|
||||
"\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"DEEPINFRA_API_TOKEN = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# or pass deepinfra_api_token parameter to the ChatDeepInfra constructor\n",
|
||||
"os.environ[\"DEEPINFRA_API_TOKEN\"] = DEEPINFRA_API_TOKEN"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get a new token: https://deepinfra.com/login?from=%2Fdash\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_models import ChatDeepInfra\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatDeepInfra(model=\"meta-llama/Llama-2-7b-chat-hf\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" J'aime la programmation.\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"DEEPINFRA_API_TOKEN = getpass()\n",
|
||||
"\n",
|
||||
"# or pass deepinfra_api_token parameter to the ChatDeepInfra constructor\n",
|
||||
"os.environ[\"DEEPINFRA_API_TOKEN\"] = DEEPINFRA_API_TOKEN\n",
|
||||
"\n",
|
||||
"chat = ChatDeepInfra(model=\"meta-llama/Llama-2-7b-chat-hf\")\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French. I love programming.\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"chat(messages)"
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -123,7 +60,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"id": "93a21c5c-6ef9-4688-be60-b2e1f94842fb",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -135,69 +72,32 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\" J'aime programmer.\", generation_info=None, message=AIMessage(content=\" J'aime programmer.\", additional_kwargs={}, example=False))]], llm_output={}, run=[RunInfo(run_id=UUID('8cc8fb68-1c35-439c-96a0-695036a93652'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await chat.agenerate([messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"id": "025be980-e50d-4a68-93dc-c9c7b500ce34",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" J'aime la programmation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" J'aime la programmation.\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatDeepInfra(\n",
|
||||
" streaming=True,\n",
|
||||
" verbose=True,\n",
|
||||
" callbacks=[StreamingStdOutCallbackHandler()],\n",
|
||||
")\n",
|
||||
"chat(messages)"
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c253883f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -114,7 +114,7 @@
|
||||
"human = \"Translate this sentence from English to French. I love programming.\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI(model_name=\"gemini-pro\", convert_system_message_to_human=True)\n",
|
||||
"chat = ChatVertexAI(model=\"gemini-pro\", convert_system_message_to_human=True)\n",
|
||||
"\n",
|
||||
"chain = prompt | chat\n",
|
||||
"chain.invoke({})"
|
||||
@@ -233,9 +233,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat = ChatVertexAI(\n",
|
||||
" model_name=\"codechat-bison\", max_output_tokens=1000, temperature=0.5\n",
|
||||
")\n",
|
||||
"chat = ChatVertexAI(model=\"codechat-bison\", max_tokens=1000, temperature=0.5)\n",
|
||||
"\n",
|
||||
"message = chat.invoke(\"Write a Python function generating all prime numbers\")\n",
|
||||
"print(message.content)"
|
||||
@@ -399,7 +397,7 @@
|
||||
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model_name=\"gemini-pro\", temperature=0)\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-pro\", temperature=0)\n",
|
||||
"llm_with_tools = llm.bind_tools([GetWeather])\n",
|
||||
"ai_msg = llm_with_tools.invoke(\n",
|
||||
" \"what is the weather like in San Francisco\",\n",
|
||||
@@ -551,7 +549,7 @@
|
||||
"human = \"{text}\"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n",
|
||||
"\n",
|
||||
"chat = ChatVertexAI(model_name=\"chat-bison\", max_output_tokens=1000, temperature=0.5)\n",
|
||||
"chat = ChatVertexAI(model=\"chat-bison\", max_tokens=1000, temperature=0.5)\n",
|
||||
"chain = prompt | chat\n",
|
||||
"\n",
|
||||
"asyncio.run(\n",
|
||||
|
||||
@@ -123,7 +123,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -172,7 +172,7 @@
|
||||
" <td>F</td>\n",
|
||||
" <td>59836 Carla Causeway Suite 939\\nPort Eugene, I...</td>\n",
|
||||
" <td>meltondenise@yahoo.com</td>\n",
|
||||
" <td>1997-09-09</td>\n",
|
||||
" <td>1997-11-23</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
@@ -181,7 +181,7 @@
|
||||
" <td>M</td>\n",
|
||||
" <td>3108 Christina Forges\\nPort Timothychester, KY...</td>\n",
|
||||
" <td>erica80@hotmail.com</td>\n",
|
||||
" <td>1924-05-05</td>\n",
|
||||
" <td>1924-07-19</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
@@ -190,7 +190,7 @@
|
||||
" <td>F</td>\n",
|
||||
" <td>Unit 7405 Box 3052\\nDPO AE 09858</td>\n",
|
||||
" <td>timothypotts@gmail.com</td>\n",
|
||||
" <td>1933-09-06</td>\n",
|
||||
" <td>1933-11-20</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
@@ -199,7 +199,7 @@
|
||||
" <td>F</td>\n",
|
||||
" <td>6408 Christopher Hill Apt. 459\\nNew Benjamin, ...</td>\n",
|
||||
" <td>dadams@gmail.com</td>\n",
|
||||
" <td>1988-07-28</td>\n",
|
||||
" <td>1988-10-11</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
@@ -208,7 +208,7 @@
|
||||
" <td>M</td>\n",
|
||||
" <td>2241 Bell Gardens Suite 723\\nScottside, CA 38463</td>\n",
|
||||
" <td>williamayala@gmail.com</td>\n",
|
||||
" <td>1930-12-19</td>\n",
|
||||
" <td>1931-03-04</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
@@ -233,14 +233,14 @@
|
||||
"\n",
|
||||
" birthdate \n",
|
||||
"id \n",
|
||||
"0 1997-09-09 \n",
|
||||
"1 1924-05-05 \n",
|
||||
"2 1933-09-06 \n",
|
||||
"3 1988-07-28 \n",
|
||||
"4 1930-12-19 "
|
||||
"0 1997-11-23 \n",
|
||||
"1 1924-07-19 \n",
|
||||
"2 1933-11-20 \n",
|
||||
"3 1988-10-11 \n",
|
||||
"4 1931-03-04 "
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -646,7 +646,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.18"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
"messages = [system_message, user_message]\n",
|
||||
"\n",
|
||||
"# chat with wasm-chat service\n",
|
||||
"response = chat(messages)\n",
|
||||
"response = chat.invoke(messages)\n",
|
||||
"\n",
|
||||
"print(f\"[Bot] {response.content}\")"
|
||||
]
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain langchain-core langchain-community"
|
||||
"!pip install langchain langchain-core langchain-community httpx"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -89,6 +89,58 @@
|
||||
"print(response) # should answer something like \"1. Max\\n2. Bella\\n3. Charlie\\n4. Rocky\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Stream Generation\n",
|
||||
"\n",
|
||||
"For tasks involving the generation of long text, such as creating an extensive article or translating a large document, it can be advantageous to receive the response in parts, as the text is generated, instead of waiting for the complete text. This makes the application more responsive and efficient, especially when the generated text is extensive. We offer two approaches to meet this need: one synchronous and another asynchronous.\n",
|
||||
"\n",
|
||||
"#### Synchronous:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [HumanMessage(content=\"Suggest 3 names for my dog\")]\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Asynchronous:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def async_invoke_chain(animal: str):\n",
|
||||
" messages = [HumanMessage(content=f\"Suggest 3 names for my {animal}\")]\n",
|
||||
" async for chunk in llm._astream(messages):\n",
|
||||
" print(chunk.message.content, end=\"\", flush=True)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await async_invoke_chain(\"dog\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -184,7 +236,7 @@
|
||||
"\n",
|
||||
"query = \"Qual o tempo máximo para realização da prova?\"\n",
|
||||
"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"\n",
|
||||
"chain.invoke(\n",
|
||||
" {\"input_documents\": docs, \"query\": query}\n",
|
||||
|
||||
@@ -9,10 +9,16 @@
|
||||
"source": [
|
||||
"# NVIDIA AI Foundation Endpoints\n",
|
||||
"\n",
|
||||
":::{.callout-caution}\n",
|
||||
"\n",
|
||||
"These are the LangChain v0.1 docs. Please refer to the updated [LangChain v0.2 docs](https://python.langchain.com/v0.2/docs/integrations/chat/nvidia_ai_endpoints/) instead.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"The `ChatNVIDIA` class is a LangChain chat model that connects to [NVIDIA AI Foundation Endpoints](https://www.nvidia.com/en-us/ai-data-science/foundation-models/).\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"> [NVIDIA AI Foundation Endpoints](https://www.nvidia.com/en-us/ai-data-science/foundation-models/) give users easy access to NVIDIA hosted API endpoints for NVIDIA AI Foundation Models like Mixtral 8x7B, Llama 2, Stable Diffusion, etc. These models, hosted on the [NVIDIA NGC catalog](https://catalog.ngc.nvidia.com/ai-foundation-models), are optimized, tested, and hosted on the NVIDIA AI platform, making them fast and easy to evaluate, further customize, and seamlessly run at peak performance on any accelerated stack.\n",
|
||||
"> [NVIDIA AI Foundation Endpoints](https://www.nvidia.com/en-us/ai-data-science/foundation-models/) give users easy access to NVIDIA hosted API endpoints for NVIDIA AI Foundation Models like Mixtral 8x7B, Llama 2, Stable Diffusion, etc. These models, hosted on the [NVIDIA API catalog](https://build.nvidia.com/), are optimized, tested, and hosted on the NVIDIA AI platform, making them fast and easy to evaluate, further customize, and seamlessly run at peak performance on any accelerated stack.\n",
|
||||
"> \n",
|
||||
"> With [NVIDIA AI Foundation Endpoints](https://www.nvidia.com/en-us/ai-data-science/foundation-models/), you can get quick results from a fully accelerated stack running on [NVIDIA DGX Cloud](https://www.nvidia.com/en-us/data-center/dgx-cloud/). Once customized, these models can be deployed anywhere with enterprise-grade security, stability, and support using [NVIDIA AI Enterprise](https://www.nvidia.com/en-us/data-center/products/ai-enterprise/).\n",
|
||||
"> \n",
|
||||
@@ -58,13 +64,13 @@
|
||||
"\n",
|
||||
"**To get started:**\n",
|
||||
"\n",
|
||||
"1. Create a free account with the [NVIDIA NGC](https://catalog.ngc.nvidia.com/) service, which hosts AI solution catalogs, containers, models, etc.\n",
|
||||
"1. Create a free account with [NVIDIA](https://build.nvidia.com/), which hosts NVIDIA AI Foundation models\n",
|
||||
"\n",
|
||||
"2. Navigate to `Catalog > AI Foundation Models > (Model with API endpoint)`.\n",
|
||||
"2. Click on your model of choice\n",
|
||||
"\n",
|
||||
"3. Select the `API` option and click `Generate Key`.\n",
|
||||
"3. Under `Input` select the `Python` tab, and click `Get API Key`. Then click `Generate Key`.\n",
|
||||
"\n",
|
||||
"4. Save the generated key as `NVIDIA_API_KEY`. From there, you should have access to the endpoints."
|
||||
"4. Copy and save the generated key as `NVIDIA_API_KEY`. From there, you should have access to the endpoints."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -311,7 +317,7 @@
|
||||
"\n",
|
||||
"Some model types support unique prompting techniques and chat messages. We will review a few important ones below.\n",
|
||||
"\n",
|
||||
"**To find out more about a specific model, please navigate to the API section of an AI Foundation model [as linked here](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/ai-foundation/models/codellama-13b/api).**"
|
||||
"**To find out more about a specific model, please navigate to the API section of an AI Foundation model [as linked here](https://build.nvidia.com/).**"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1149,7 +1155,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -15,9 +15,15 @@
|
||||
"source": [
|
||||
"# OllamaFunctions\n",
|
||||
"\n",
|
||||
":::{.callout-caution}\n",
|
||||
"\n",
|
||||
"This was an experimental wrapper that bolted-on tool calling support to models that do not natively support it. The primary Ollama integration now supports tool calling, and should be used instead. See example usage in LangChain v0.2 documentation [here](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"This notebook shows how to use an experimental wrapper around Ollama that gives it the same API as OpenAI Functions.\n",
|
||||
"\n",
|
||||
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use Mistral.\n",
|
||||
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
@@ -32,12 +38,18 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-28T00:53:25.276543Z",
|
||||
"start_time": "2024-04-28T00:53:24.881202Z"
|
||||
},
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_experimental.llms.ollama_functions import OllamaFunctions\n",
|
||||
"\n",
|
||||
"model = OllamaFunctions(model=\"mistral\")"
|
||||
"model = OllamaFunctions(model=\"llama3\", format=\"json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -50,11 +62,16 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:17.270931Z",
|
||||
"start_time": "2024-04-26T04:59:17.263347Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = model.bind(\n",
|
||||
" functions=[\n",
|
||||
"model = model.bind_tools(\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"name\": \"get_current_weather\",\n",
|
||||
" \"description\": \"Get the current weather in a given location\",\n",
|
||||
@@ -88,12 +105,17 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:26.092428Z",
|
||||
"start_time": "2024-04-26T04:59:17.272627Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_weather', 'arguments': '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}'}})"
|
||||
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'get_current_weather', 'arguments': '{\"location\": \"Boston, MA\"}'}}, id='run-1791f9fe-95ad-4ca4-bdf7-9f73eab31e6f-0')"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
@@ -111,54 +133,119 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using for extraction\n",
|
||||
"## Structured Output\n",
|
||||
"\n",
|
||||
"One useful thing you can do with function calling here is extracting properties from a given input in a structured format:"
|
||||
"One useful thing you can do with function calling using `with_structured_output()` function is extracting properties from a given input in a structured format:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:26.098828Z",
|
||||
"start_time": "2024-04-26T04:59:26.094021Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Schema for structured response\n",
|
||||
"class Person(BaseModel):\n",
|
||||
" name: str = Field(description=\"The person's name\", required=True)\n",
|
||||
" height: float = Field(description=\"The person's height\", required=True)\n",
|
||||
" hair_color: str = Field(description=\"The person's hair color\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Prompt template\n",
|
||||
"prompt = PromptTemplate.from_template(\n",
|
||||
" \"\"\"Alex is 5 feet tall. \n",
|
||||
"Claudia is 1 feet taller than Alex and jumps higher than him. \n",
|
||||
"Claudia is a brunette and Alex is blonde.\n",
|
||||
"\n",
|
||||
"Human: {question}\n",
|
||||
"AI: \"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Chain\n",
|
||||
"llm = OllamaFunctions(model=\"phi3\", format=\"json\", temperature=0)\n",
|
||||
"structured_llm = llm.with_structured_output(Person)\n",
|
||||
"chain = prompt | structured_llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Extracting data about Alex"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:30.164955Z",
|
||||
"start_time": "2024-04-26T04:59:26.099790Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'name': 'Alex', 'height': 5, 'hair_color': 'blonde'},\n",
|
||||
" {'name': 'Claudia', 'height': 6, 'hair_color': 'brunette'}]"
|
||||
"Person(name='Alex', height=5.0, hair_color='blonde')"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import create_extraction_chain\n",
|
||||
"\n",
|
||||
"# Schema\n",
|
||||
"schema = {\n",
|
||||
" \"properties\": {\n",
|
||||
" \"name\": {\"type\": \"string\"},\n",
|
||||
" \"height\": {\"type\": \"integer\"},\n",
|
||||
" \"hair_color\": {\"type\": \"string\"},\n",
|
||||
" },\n",
|
||||
" \"required\": [\"name\", \"height\"],\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# Input\n",
|
||||
"input = \"\"\"Alex is 5 feet tall. Claudia is 1 feet taller than Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\"\"\"\n",
|
||||
"\n",
|
||||
"# Run chain\n",
|
||||
"llm = OllamaFunctions(model=\"mistral\", temperature=0)\n",
|
||||
"chain = create_extraction_chain(schema, llm)\n",
|
||||
"chain.run(input)"
|
||||
"alex = chain.invoke(\"Describe Alex\")\n",
|
||||
"alex"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Extracting data about Claudia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-04-26T04:59:31.509846Z",
|
||||
"start_time": "2024-04-26T04:59:30.165662Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Person(name='Claudia', height=6.0, hair_color='brunette')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"claudia = chain.invoke(\"Describe Claudia\")\n",
|
||||
"claudia"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -172,9 +259,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
"source": [
|
||||
"# ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
"This notebook covers how to get started with OpenAI chat models."
|
||||
]
|
||||
},
|
||||
@@ -147,7 +148,7 @@
|
||||
"\n",
|
||||
"### ChatOpenAI.bind_tools()\n",
|
||||
"\n",
|
||||
"With `ChatAnthropic.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an Anthropic tool schemas, which looks like:\n",
|
||||
"With `ChatOpenAI.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an OpenAI tool schemas, which looks like:\n",
|
||||
"```\n",
|
||||
"{\n",
|
||||
" \"name\": \"...\",\n",
|
||||
|
||||
119
docs/docs/integrations/chat/together.ipynb
Normal file
119
docs/docs/integrations/chat/together.ipynb
Normal file
@@ -0,0 +1,119 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2970dd75-8ebf-4b51-8282-9b454b8f356d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Together AI\n",
|
||||
"\n",
|
||||
"[Together AI](https://www.together.ai/) offers an API to query [50+ leading open-source models](https://docs.together.ai/docs/inference-models) in a couple lines of code.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with Together AI models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1c47fc36",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ecdb29d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade langchain-together"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "89883202",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Environment\n",
|
||||
"\n",
|
||||
"To use Together AI, you'll need an API key which you can find here:\n",
|
||||
"https://api.together.ai/settings/api-keys. This can be passed in as an init param\n",
|
||||
"``together_api_key`` or set as environment variable ``TOGETHER_API_KEY``.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8304b4d9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "637bb53f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Querying chat models with Together AI\n",
|
||||
"\n",
|
||||
"from langchain_together import ChatTogether\n",
|
||||
"\n",
|
||||
"# choose from our 50+ models here: https://docs.together.ai/docs/inference-models\n",
|
||||
"chat = ChatTogether(\n",
|
||||
" # together_api_key=\"YOUR_API_KEY\",\n",
|
||||
" model=\"meta-llama/Llama-3-70b-chat-hf\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# stream the response back from the model\n",
|
||||
"for m in chat.stream(\"Tell me fun things to do in NYC\"):\n",
|
||||
" print(m.content, end=\"\", flush=True)\n",
|
||||
"\n",
|
||||
"# if you don't want to do streaming, you can use the invoke method\n",
|
||||
"# chat.invoke(\"Tell me fun things to do in NYC\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e7b7170d-d7c5-4890-9714-a37238343805",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Querying code and language models with Together AI\n",
|
||||
"\n",
|
||||
"from langchain_together import Together\n",
|
||||
"\n",
|
||||
"llm = Together(\n",
|
||||
" model=\"codellama/CodeLlama-70b-Python-hf\",\n",
|
||||
" # together_api_key=\"...\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(llm.invoke(\"def bubble_sort(): \"))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -142,11 +142,70 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"## Tool Calling\n",
|
||||
"ChatTongyi supports tool calling API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'name': 'get_current_weather', 'arguments': '{\"location\": \"San Francisco\"}'}, 'id': '', 'type': 'function'}]}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': 'dae79197-8780-9b7e-8c15-6a83e2a53534', 'token_usage': {'input_tokens': 229, 'output_tokens': 19, 'total_tokens': 248}}, id='run-9e06f837-582b-473b-bb1f-5e99a68ecc10-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco'}, 'id': ''}])"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.tongyi import ChatTongyi\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"\n",
|
||||
"tools = [\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"get_current_time\",\n",
|
||||
" \"description\": \"当你想知道现在的时间时非常有用。\",\n",
|
||||
" \"parameters\": {},\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"function\",\n",
|
||||
" \"function\": {\n",
|
||||
" \"name\": \"get_current_weather\",\n",
|
||||
" \"description\": \"当你想查询指定城市的天气时非常有用。\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"location\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"城市或县区,比如北京市、杭州市、余杭区等。\",\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"location\"],\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(content=\"You are a helpful assistant.\"),\n",
|
||||
" HumanMessage(content=\"What is the weather like in San Francisco?\"),\n",
|
||||
"]\n",
|
||||
"chatLLM = ChatTongyi()\n",
|
||||
"llm_kwargs = {\"tools\": tools, \"result_format\": \"message\"}\n",
|
||||
"ai_message = chatLLM.bind(**llm_kwargs).invoke(messages)\n",
|
||||
"ai_message"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -119,7 +119,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"response = chat(messages)\n",
|
||||
"response = chat.invoke(messages)\n",
|
||||
"print(response.content) # Displays the AI-generated poem"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -216,11 +216,11 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_loaders.base import ChatSession\n",
|
||||
"from langchain_community.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"from langchain_core.chat_sessions import ChatSession\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
|
||||
@@ -116,11 +116,11 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_loaders.base import ChatSession\n",
|
||||
"from langchain_community.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"from langchain_core.chat_sessions import ChatSession\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
|
||||
@@ -87,11 +87,11 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_loaders.base import ChatSession\n",
|
||||
"from langchain_community.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"from langchain_core.chat_sessions import ChatSession\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
|
||||
@@ -136,11 +136,11 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_loaders.base import ChatSession\n",
|
||||
"from langchain_community.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"from langchain_core.chat_sessions import ChatSession\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
|
||||
@@ -209,11 +209,11 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_loaders.base import ChatSession\n",
|
||||
"from langchain_community.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"from langchain_core.chat_sessions import ChatSession\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
|
||||
@@ -126,11 +126,11 @@
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_loaders.base import ChatSession\n",
|
||||
"from langchain_community.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"from langchain_core.chat_sessions import ChatSession\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
|
||||
122
docs/docs/integrations/document_loaders/browserbase.ipynb
Normal file
122
docs/docs/integrations/document_loaders/browserbase.ipynb
Normal file
@@ -0,0 +1,122 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Browserbase\n",
|
||||
"\n",
|
||||
"[Browserbase](https://browserbase.com) is a serverless platform for running headless browsers, it offers advanced debugging, session recordings, stealth mode, integrated proxies and captcha solving.\n",
|
||||
"\n",
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`).\n",
|
||||
"- Install the [Browserbase SDK](http://github.com/browserbase/python-sdk):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"% pip install browserbase"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can load webpages into LangChain using `BrowserbaseLoader`. Optionally, you can set `text_content` parameter to convert the pages to text-only representation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import BrowserbaseLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = BrowserbaseLoader(\n",
|
||||
" urls=[\n",
|
||||
" \"https://example.com\",\n",
|
||||
" ],\n",
|
||||
" # Text mode\n",
|
||||
" text_content=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"docs = loader.load()\n",
|
||||
"print(docs[0].page_content[:61])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading images\n",
|
||||
"\n",
|
||||
"You can also load screenshots of webpages (as bytes) for multi-modal models.\n",
|
||||
"\n",
|
||||
"Full example using GPT-4V:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from browserbase import Browserbase\n",
|
||||
"from browserbase.helpers.gpt4 import GPT4VImage, GPT4VImageDetail\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-4-vision-preview\", max_tokens=256)\n",
|
||||
"browser = Browserbase()\n",
|
||||
"\n",
|
||||
"screenshot = browser.screenshot(\"https://browserbase.com\")\n",
|
||||
"\n",
|
||||
"result = chat.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"What color is the logo?\"},\n",
|
||||
" GPT4VImage(screenshot, GPT4VImageDetail.auto),\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(result.content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -630,7 +630,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Query retriever, should return parents (using MMR since that was set as search_type above)\n",
|
||||
"retrieved_parent_docs = retriever.get_relevant_documents(\n",
|
||||
"retrieved_parent_docs = retriever.invoke(\n",
|
||||
" \"what signs does Birch Street allow on their property?\"\n",
|
||||
")\n",
|
||||
"for chunk in retrieved_parent_docs:\n",
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
" # delete the gpt-4 model_name to use the default gpt-3.5 turbo for faster results\n",
|
||||
" gpt_4 = ChatOpenAI(temperature=0.02, model_name=\"gpt-4\")\n",
|
||||
" # Use the retriever's 'get_relevant_documents' method if needed to filter down longer docs\n",
|
||||
" relevant_nodes = figma_doc_retriever.get_relevant_documents(human_input)\n",
|
||||
" relevant_nodes = figma_doc_retriever.invoke(human_input)\n",
|
||||
" conversation = [system_message_prompt, human_message_prompt]\n",
|
||||
" chat_prompt = ChatPromptTemplate.from_messages(conversation)\n",
|
||||
" response = gpt_4(\n",
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet google-cloud-bigquery"
|
||||
"%pip install --upgrade --quiet langchain-google-community[bigquery]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -31,7 +31,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import BigQueryLoader"
|
||||
"from langchain_google_community import BigQueryLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet google-cloud-storage"
|
||||
"%pip install --upgrade --quiet langchain-google-community[gcs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -31,7 +31,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import GCSDirectoryLoader"
|
||||
"from langchain_google_community import GCSDirectoryLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet google-cloud-storage"
|
||||
"%pip install --upgrade --quiet langchain-google-community[gcs]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -31,7 +31,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import GCSFileLoader"
|
||||
"from langchain_google_community import GCSFileLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet google-api-python-client google-auth-httplib2 google-auth-oauthlib"
|
||||
"%pip install --upgrade --quiet langchain-google-community[drive]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -50,7 +50,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import GoogleDriveLoader"
|
||||
"from langchain_google_community import GoogleDriveLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -121,10 +121,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import (\n",
|
||||
" GoogleDriveLoader,\n",
|
||||
" UnstructuredFileIOLoader,\n",
|
||||
")"
|
||||
"from langchain_community.document_loaders import UnstructuredFileIOLoader\n",
|
||||
"from langchain_google_community import GoogleDriveLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -219,7 +217,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Extended usage\n",
|
||||
"An external component can manage the complexity of Google Drive : `langchain-googledrive`\n",
|
||||
"An external (unofficial) component can manage the complexity of Google Drive : `langchain-googledrive`\n",
|
||||
"It's compatible with the ̀`langchain_community.document_loaders.GoogleDriveLoader` and can be used\n",
|
||||
"in its place.\n",
|
||||
"\n",
|
||||
@@ -339,7 +337,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import GoogleDriveLoader\n",
|
||||
"from langchain_google_community import GoogleDriveLoader\n",
|
||||
"\n",
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" folder_id=folder_id,\n",
|
||||
@@ -368,6 +366,54 @@
|
||||
"doc[0].metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ae0a525",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Loading extended metadata\n",
|
||||
"Following extra fields can also be fetched within metadata of each Document:\n",
|
||||
" - full_path - Full path of the file/s in google drive.\n",
|
||||
" - owner - owner of the file/s.\n",
|
||||
" - size - size of the file/s."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6c0db38c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_community import GoogleDriveLoader\n",
|
||||
"\n",
|
||||
"loader = GoogleDriveLoader(\n",
|
||||
" folder_id=folder_id,\n",
|
||||
" load_extended_matadata=True,\n",
|
||||
" # Optional: configure whether to load extended metadata for each Document.\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"doc = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "826d88a7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can pass load_extended_matadata=True, to add Google Drive document extended details to metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fdaf04e4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc[0].metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cd13d7d1-db7a-498d-ac98-76ccd9ad9019",
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet google-cloud-speech"
|
||||
"%pip install --upgrade --quiet langchain-google-community[speech]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -52,7 +52,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import GoogleSpeechToTextLoader\n",
|
||||
"from langchain_google_community import GoogleSpeechToTextLoader\n",
|
||||
"\n",
|
||||
"project_id = \"<PROJECT_ID>\"\n",
|
||||
"file_path = \"gs://cloud-samples-data/speech/audio.flac\"\n",
|
||||
@@ -152,7 +152,7 @@
|
||||
" RecognitionConfig,\n",
|
||||
" RecognitionFeatures,\n",
|
||||
")\n",
|
||||
"from langchain_community.document_loaders import GoogleSpeechToTextLoader\n",
|
||||
"from langchain_google_community import GoogleSpeechToTextLoader\n",
|
||||
"\n",
|
||||
"project_id = \"<PROJECT_ID>\"\n",
|
||||
"location = \"global\"\n",
|
||||
|
||||
125
docs/docs/integrations/document_loaders/kinetica.ipynb
Normal file
125
docs/docs/integrations/document_loaders/kinetica.ipynb
Normal file
@@ -0,0 +1,125 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Kinetica\n",
|
||||
"\n",
|
||||
"This notebooks goes over how to load documents from Kinetica"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install gpudb==7.2.0.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.kinetica_loader import KineticaLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## Loading Environment Variables\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from langchain_community.vectorstores import (\n",
|
||||
" KineticaSettings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Kinetica needs the connection to the database.\n",
|
||||
"# This is how to set it up.\n",
|
||||
"HOST = os.getenv(\"KINETICA_HOST\", \"http://127.0.0.1:9191\")\n",
|
||||
"USERNAME = os.getenv(\"KINETICA_USERNAME\", \"\")\n",
|
||||
"PASSWORD = os.getenv(\"KINETICA_PASSWORD\", \"\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_config() -> KineticaSettings:\n",
|
||||
" return KineticaSettings(host=HOST, username=USERNAME, password=PASSWORD)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.kinetica_loader import KineticaLoader\n",
|
||||
"\n",
|
||||
"# The following `QUERY` is an example which will not run; this\n",
|
||||
"# needs to be substituted with a valid `QUERY` that will return\n",
|
||||
"# data and the `SCHEMA.TABLE` combination must exist in Kinetica.\n",
|
||||
"\n",
|
||||
"QUERY = \"select text, survey_id from SCHEMA.TABLE limit 10\"\n",
|
||||
"kinetica_loader = KineticaLoader(\n",
|
||||
" QUERY,\n",
|
||||
" HOST,\n",
|
||||
" USERNAME,\n",
|
||||
" PASSWORD,\n",
|
||||
")\n",
|
||||
"kinetica_documents = kinetica_loader.load()\n",
|
||||
"print(kinetica_documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.kinetica_loader import KineticaLoader\n",
|
||||
"\n",
|
||||
"# The following `QUERY` is an example which will not run; this\n",
|
||||
"# needs to be substituted with a valid `QUERY` that will return\n",
|
||||
"# data and the `SCHEMA.TABLE` combination must exist in Kinetica.\n",
|
||||
"\n",
|
||||
"QUERY = \"select text, survey_id as source from SCHEMA.TABLE limit 10\"\n",
|
||||
"snowflake_loader = KineticaLoader(\n",
|
||||
" query=QUERY,\n",
|
||||
" host=HOST,\n",
|
||||
" username=USERNAME,\n",
|
||||
" password=PASSWORD,\n",
|
||||
" metadata_columns=[\"source\"],\n",
|
||||
")\n",
|
||||
"kinetica_documents = snowflake_loader.load()\n",
|
||||
"print(kinetica_documents)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -30,13 +30,24 @@
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_community.document_loaders.larksuite import LarkSuiteDocLoader\n",
|
||||
"from langchain_community.document_loaders.larksuite import (\n",
|
||||
" LarkSuiteDocLoader,\n",
|
||||
" LarkSuiteWikiLoader,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"DOMAIN = input(\"larksuite domain\")\n",
|
||||
"ACCESS_TOKEN = getpass(\"larksuite tenant_access_token or user_access_token\")\n",
|
||||
"DOCUMENT_ID = input(\"larksuite document id\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4b6b9a66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load From Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
@@ -65,6 +76,38 @@
|
||||
"pprint(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "86f4a714",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load From Wiki"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "7332dfb9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='Test doc\\nThis is a test wiki doc.\\n', metadata={'document_id': 'TxOKdtMWaoSTDLxYS4ZcdEI7nwc', 'revision_id': 15, 'title': 'Test doc'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"DOCUMENT_ID = input(\"larksuite wiki id\")\n",
|
||||
"larksuite_loader = LarkSuiteWikiLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID)\n",
|
||||
"docs = larksuite_loader.load()\n",
|
||||
"\n",
|
||||
"pprint(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
||||
130
docs/docs/integrations/document_loaders/mintbase.ipynb
Normal file
130
docs/docs/integrations/document_loaders/mintbase.ipynb
Normal file
@@ -0,0 +1,130 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "vm8vn9t8DvC_"
|
||||
},
|
||||
"source": [
|
||||
"# Near Blockchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5WjXERXzFEhg"
|
||||
},
|
||||
"source": [
|
||||
"## Overview"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "juAmbgoWD17u"
|
||||
},
|
||||
"source": [
|
||||
"The intention of this notebook is to provide a means of testing functionality in the Langchain Document Loader for Near Blockchain.\n",
|
||||
"\n",
|
||||
"Initially this Loader supports:\n",
|
||||
"\n",
|
||||
"* Loading NFTs as Documents from NFT Smart Contracts (NEP-171 and NEP-177)\n",
|
||||
"* Near Mainnnet, Near Testnet (default is mainnet)\n",
|
||||
"* Mintbase's Graph API\n",
|
||||
"\n",
|
||||
"It can be extended if the community finds value in this loader. Specifically:\n",
|
||||
"\n",
|
||||
"* Additional APIs can be added (e.g. Tranction-related APIs)\n",
|
||||
"\n",
|
||||
"This Document Loader Requires:\n",
|
||||
"\n",
|
||||
"* A free [Mintbase API Key](https://docs.mintbase.xyz/dev/mintbase-graph/)\n",
|
||||
"\n",
|
||||
"The output takes the following format:\n",
|
||||
"\n",
|
||||
"- pageContent= Individual NFT\n",
|
||||
"- metadata={'source': 'nft.yearofchef.near', 'blockchain': 'mainnet', 'tokenId': '1846'}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load NFTs into Document Loader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# get MINTBASE_API_KEY from https://docs.mintbase.xyz/dev/mintbase-graph/\n",
|
||||
"\n",
|
||||
"mintbaseApiKey = \"...\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Option 1: Ethereum Mainnet (default BlockchainType)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "J3LWHARC-Kn0"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from MintbaseLoader import MintbaseDocumentLoader\n",
|
||||
"\n",
|
||||
"contractAddress = \"nft.yearofchef.near\" # Year of chef contract address\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"blockchainLoader = MintbaseDocumentLoader(\n",
|
||||
" contract_address=contractAddress, blockchain_type=\"mainnet\", api_key=\"omni-site\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"nfts = blockchainLoader.load()\n",
|
||||
"\n",
|
||||
"print(nfts[:1])\n",
|
||||
"\n",
|
||||
"for doc in blockchainLoader.lazy_load():\n",
|
||||
" print()\n",
|
||||
" print(type(doc))\n",
|
||||
" print(doc)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"collapsed_sections": [
|
||||
"5WjXERXzFEhg"
|
||||
],
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
236
docs/docs/integrations/document_loaders/oracleai.ipynb
Normal file
236
docs/docs/integrations/document_loaders/oracleai.ipynb
Normal file
@@ -0,0 +1,236 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Oracle AI Vector Search: Document Processing\n",
|
||||
"Oracle AI Vector Search is designed for Artificial Intelligence (AI) workloads that allows you to query data based on semantics, rather than keywords. One of the biggest benefit of Oracle AI Vector Search is that semantic search on unstructured data can be combined with relational search on business data in one single system. This is not only powerful but also significantly more effective because you don't need to add a specialized vector database, eliminating the pain of data fragmentation between multiple systems.\n",
|
||||
"\n",
|
||||
"The guide demonstrates how to use Document Processing Capabilities within Oracle AI Vector Search to load and chunk documents using OracleDocLoader and OracleTextSplitter respectively."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Prerequisites\n",
|
||||
"\n",
|
||||
"Please install Oracle Python Client driver to use Langchain with Oracle AI Vector Search. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# pip install oracledb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Connect to Oracle Database\n",
|
||||
"The following sample code will show how to connect to Oracle Database. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"import oracledb\n",
|
||||
"\n",
|
||||
"# please update with your username, password, hostname and service_name\n",
|
||||
"username = \"<username>\"\n",
|
||||
"password = \"<password>\"\n",
|
||||
"dsn = \"<hostname>/<service_name>\"\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" conn = oracledb.connect(user=username, password=password, dsn=dsn)\n",
|
||||
" print(\"Connection successful!\")\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Connection failed!\")\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's create a table and insert some sample docs to test."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"try:\n",
|
||||
" cursor = conn.cursor()\n",
|
||||
"\n",
|
||||
" drop_table_sql = \"\"\"drop table if exists demo_tab\"\"\"\n",
|
||||
" cursor.execute(drop_table_sql)\n",
|
||||
"\n",
|
||||
" create_table_sql = \"\"\"create table demo_tab (id number, data clob)\"\"\"\n",
|
||||
" cursor.execute(create_table_sql)\n",
|
||||
"\n",
|
||||
" insert_row_sql = \"\"\"insert into demo_tab values (:1, :2)\"\"\"\n",
|
||||
" rows_to_insert = [\n",
|
||||
" (\n",
|
||||
" 1,\n",
|
||||
" \"If the answer to any preceding questions is yes, then the database stops the search and allocates space from the specified tablespace; otherwise, space is allocated from the database default shared temporary tablespace.\",\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" 2,\n",
|
||||
" \"A tablespace can be online (accessible) or offline (not accessible) whenever the database is open.\\nA tablespace is usually online so that its data is available to users. The SYSTEM tablespace and temporary tablespaces cannot be taken offline.\",\n",
|
||||
" ),\n",
|
||||
" (\n",
|
||||
" 3,\n",
|
||||
" \"The database stores LOBs differently from other data types. Creating a LOB column implicitly creates a LOB segment and a LOB index. The tablespace containing the LOB segment and LOB index, which are always stored together, may be different from the tablespace containing the table.\\nSometimes the database can store small amounts of LOB data in the table itself rather than in a separate LOB segment.\",\n",
|
||||
" ),\n",
|
||||
" ]\n",
|
||||
" cursor.executemany(insert_row_sql, rows_to_insert)\n",
|
||||
"\n",
|
||||
" conn.commit()\n",
|
||||
"\n",
|
||||
" print(\"Table created and populated.\")\n",
|
||||
" cursor.close()\n",
|
||||
"except Exception as e:\n",
|
||||
" print(\"Table creation failed.\")\n",
|
||||
" cursor.close()\n",
|
||||
" conn.close()\n",
|
||||
" sys.exit(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load Documents\n",
|
||||
"The users can load the documents from Oracle Database or a file system or both. They just need to set the loader parameters accordingly. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"\n",
|
||||
"The main benefit of using OracleDocLoader is that it can handle 150+ different file formats. You don't need to use different types of loader for different file formats. Here is the list of the formats that we support: [Oracle Text Supported Document Formats](https://docs.oracle.com/en/database/oracle/oracle-database/23/ccref/oracle-text-supported-document-formats.html)\n",
|
||||
"\n",
|
||||
"The following sample code will show how to do that:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.oracleai import OracleDocLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"# loading a local file\n",
|
||||
"loader_params = {}\n",
|
||||
"loader_params[\"file\"] = \"<file>\"\n",
|
||||
"\n",
|
||||
"# loading from a local directory\n",
|
||||
"loader_params = {}\n",
|
||||
"loader_params[\"dir\"] = \"<directory>\"\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"# loading from Oracle Database table\n",
|
||||
"loader_params = {\n",
|
||||
" \"owner\": \"<owner>\",\n",
|
||||
" \"tablename\": \"demo_tab\",\n",
|
||||
" \"colname\": \"data\",\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\"\"\" load the docs \"\"\"\n",
|
||||
"loader = OracleDocLoader(conn=conn, params=loader_params)\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of docs loaded: {len(docs)}\")\n",
|
||||
"# print(f\"Document-0: {docs[0].page_content}\") # content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Split Documents\n",
|
||||
"The documents can be in different sizes: small, medium, large, or very large. The users like to split/chunk their documents into smaller pieces to generate embeddings. There are lots of different splitting customizations the users can do. Please refer to the Oracle AI Vector Search Guide book for complete information about these parameters.\n",
|
||||
"\n",
|
||||
"The following sample code will show how to do that:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.oracleai import OracleTextSplitter\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"# Some examples\n",
|
||||
"# split by chars, max 500 chars\n",
|
||||
"splitter_params = {\"split\": \"chars\", \"max\": 500, \"normalize\": \"all\"}\n",
|
||||
"\n",
|
||||
"# split by words, max 100 words\n",
|
||||
"splitter_params = {\"split\": \"words\", \"max\": 100, \"normalize\": \"all\"}\n",
|
||||
"\n",
|
||||
"# split by sentence, max 20 sentences\n",
|
||||
"splitter_params = {\"split\": \"sentence\", \"max\": 20, \"normalize\": \"all\"}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"# split by default parameters\n",
|
||||
"splitter_params = {\"normalize\": \"all\"}\n",
|
||||
"\n",
|
||||
"# get the splitter instance\n",
|
||||
"splitter = OracleTextSplitter(conn=conn, params=splitter_params)\n",
|
||||
"\n",
|
||||
"list_chunks = []\n",
|
||||
"for doc in docs:\n",
|
||||
" chunks = splitter.split_text(doc.page_content)\n",
|
||||
" list_chunks.extend(chunks)\n",
|
||||
"\n",
|
||||
"\"\"\" verify \"\"\"\n",
|
||||
"print(f\"Number of Chunks: {len(list_chunks)}\")\n",
|
||||
"# print(f\"Chunk-0: {list_chunks[0]}\") # content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### End to End Demo\n",
|
||||
"Please refer to our complete demo guide [Oracle AI Vector Search End-to-End Demo Guide](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) to build an end to end RAG pipeline with the help of Oracle AI Vector Search.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -6,17 +6,19 @@
|
||||
"source": [
|
||||
"# Pebblo Safe DocumentLoader\n",
|
||||
"\n",
|
||||
"> [Pebblo](https://github.com/daxa-ai/pebblo) enables developers to safely load data and promote their Gen AI app to deployment without worrying about the organization’s compliance and security requirements. The project identifies semantic topics and entities found in the loaded data and summarizes them on the UI or a PDF report.\n",
|
||||
"> [Pebblo](https://daxa-ai.github.io/pebblo/) enables developers to safely load data and promote their Gen AI app to deployment without worrying about the organization’s compliance and security requirements. The project identifies semantic topics and entities found in the loaded data and summarizes them on the UI or a PDF report.\n",
|
||||
"\n",
|
||||
"Pebblo has two components.\n",
|
||||
"\n",
|
||||
"1. Pebblo Safe DocumentLoader for Langchain\n",
|
||||
"1. Pebblo Daemon\n",
|
||||
"1. Pebblo Server\n",
|
||||
"\n",
|
||||
"This document describes how to augment your existing Langchain DocumentLoader with Pebblo Safe DocumentLoader to get deep data visibility on the types of Topics and Entities ingested into the Gen-AI Langchain application. For details on `Pebblo Daemon` see this [pebblo daemon](https://daxa-ai.github.io/pebblo-docs/daemon.html) document.\n",
|
||||
"This document describes how to augment your existing Langchain DocumentLoader with Pebblo Safe DocumentLoader to get deep data visibility on the types of Topics and Entities ingested into the Gen-AI Langchain application. For details on `Pebblo Server` see this [pebblo server](https://daxa-ai.github.io/pebblo/daemon) document.\n",
|
||||
"\n",
|
||||
"Pebblo Safeloader enables safe data ingestion for Langchain `DocumentLoader`. This is done by wrapping the document loader call with `Pebblo Safe DocumentLoader`.\n",
|
||||
"\n",
|
||||
"Note: To configure pebblo server on some url other that pebblo's default (localhost:8000) url, put the correct URL in `PEBBLO_CLASSIFIER_URL` env variable. This is configurable using the `classifier_url` keyword argument as well. Ref: [server-configurations](https://daxa-ai.github.io/pebblo/config)\n",
|
||||
"\n",
|
||||
"#### How to Pebblo enable Document Loading?\n",
|
||||
"\n",
|
||||
"Assume a Langchain RAG application snippet using `CSVLoader` to read a CSV document for inference.\n",
|
||||
@@ -69,7 +71,7 @@
|
||||
"source": [
|
||||
"### Send semantic topics and identities to Pebblo cloud server\n",
|
||||
"\n",
|
||||
"To send semantic data to pebblo-cloud, pass api-key to PebbloSafeLoader as an argument or alternatively, put the api-ket in `PEBBLO_API_KEY` environment variable."
|
||||
"To send semantic data to pebblo-cloud, pass api-key to PebbloSafeLoader as an argument or alternatively, put the api-key in `PEBBLO_API_KEY` environment variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -91,6 +93,41 @@
|
||||
"documents = loader.load()\n",
|
||||
"print(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Add semantic topics and identities to loaded metadata\n",
|
||||
"\n",
|
||||
"To add semantic topics and sematic entities to metadata of loaded documents, set load_semantic to True as an argument or alternatively, define a new environment variable `PEBBLO_LOAD_SEMANTIC`, and setting it to True."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
||||
"from langchain_community.document_loaders import PebbloSafeLoader\n",
|
||||
"\n",
|
||||
"loader = PebbloSafeLoader(\n",
|
||||
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
|
||||
" name=\"acme-corp-rag-1\", # App name (Mandatory)\n",
|
||||
" owner=\"Joe Smith\", # Owner (Optional)\n",
|
||||
" description=\"Support productivity RAG application\", # Description (Optional)\n",
|
||||
" api_key=\"my-api-key\", # API key (Optional, can be set in the environment variable PEBBLO_API_KEY)\n",
|
||||
" load_semantic=True, # Load semantic data (Optional, default is False, can be set in the environment variable PEBBLO_LOAD_SEMANTIC)\n",
|
||||
")\n",
|
||||
"documents = loader.load()\n",
|
||||
"print(documents[0].metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
95
docs/docs/integrations/document_loaders/spider.ipynb
Normal file
95
docs/docs/integrations/document_loaders/spider.ipynb
Normal file
@@ -0,0 +1,95 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Spider\n",
|
||||
"[Spider](https://spider.cloud/) is the [fastest](https://github.com/spider-rs/spider/blob/main/benches/BENCHMARKS.md) and most affordable crawler and scraper that returns LLM-ready data.\n",
|
||||
"\n",
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install spider-client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"To use spider you need to have an API key from [spider.cloud](https://spider.cloud/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Document(page_content='Spider - Fastest Web Crawler built for AI Agents and Large Language Models[Spider v1 Logo Spider ](/)The World\\'s Fastest and Cheapest Crawler API==========View Demo* Basic* StreamingExample requestPythonCopy```import requests, osheaders = { \\'Authorization\\': os.environ[\"SPIDER_API_KEY\"], \\'Content-Type\\': \\'application/json\\',}json_data = {\"limit\":50,\"url\":\"http://www.example.com\"}response = requests.post(\\'https://api.spider.cloud/crawl\\', headers=headers, json=json_data)print(response.json())```Example ResponseScrape with no headaches----------* Proxy rotations* Agent headers* Avoid anti-bot detections* Headless chrome* Markdown LLM ResponsesThe Fastest Web Crawler----------* Powered by [spider-rs](https://github.com/spider-rs/spider)* Do 20,000 pages in seconds* Full concurrency* Powerful and simple API* Cost effectiveScrape Anything with AI----------* Custom scripting browser* Custom data extraction* Data pipelines* Detailed insights* Advanced labeling[API](/docs/api) [Price](/credits/new) [Guides](/guides) [About](/about) [Docs](https://docs.rs/spider/latest/spider/) [Privacy](/privacy) [Terms](/eula)© 2024 Spider from A11yWatchTheme Light Dark Toggle Theme [GitHubGithub](https://github.com/spider-rs/spider)', metadata={'description': 'Collect data rapidly from any website. Seamlessly scrape websites and get data tailored for LLM workloads.', 'domain': 'spider.cloud', 'extracted_data': None, 'file_size': 33743, 'keywords': None, 'pathname': '/', 'resource_type': 'html', 'title': 'Spider - Fastest Web Crawler built for AI Agents and Large Language Models', 'url': '48f1bc3c-3fbb-408a-865b-c191a1bb1f48/spider.cloud/index.html', 'user_id': '48f1bc3c-3fbb-408a-865b-c191a1bb1f48'})]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import SpiderLoader\n",
|
||||
"\n",
|
||||
"loader = SpiderLoader(\n",
|
||||
" api_key=\"YOUR_API_KEY\",\n",
|
||||
" url=\"https://spider.cloud\",\n",
|
||||
" mode=\"scrape\", # if no API key is provided it looks for SPIDER_API_KEY in env\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()\n",
|
||||
"print(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Modes\n",
|
||||
"- `scrape`: Default mode that scrapes a single URL\n",
|
||||
"- `crawl`: Crawl all subpages of the domain url provided"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Crawler options\n",
|
||||
"The `params` parameter is a dictionary that can be passed to the loader. See the [Spider documentation](https://spider.cloud/docs/api) to see all available parameters"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -99,7 +99,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Test the retriever\n",
|
||||
"spreedly_doc_retriever.get_relevant_documents(\"CRC\")"
|
||||
"spreedly_doc_retriever.invoke(\"CRC\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
126
docs/docs/integrations/document_loaders/upstage.ipynb
Normal file
126
docs/docs/integrations/document_loaders/upstage.ipynb
Normal file
@@ -0,0 +1,126 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "910f5772b6af13c9",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Upstage\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "433f5422ad8e1efa",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# UpstageLayoutAnalysisLoader\n",
|
||||
"\n",
|
||||
"This notebook covers how to get started with `UpstageLayoutAnalysisLoader`.\n",
|
||||
"\n",
|
||||
"## Installation\n",
|
||||
"\n",
|
||||
"Install `langchain-upstage` package.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install -U langchain-upstage\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e6e5941c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Environment Setup\n",
|
||||
"\n",
|
||||
"Make sure to set the following environment variables:\n",
|
||||
"\n",
|
||||
"- `UPSTAGE_API_KEY`: Your Upstage API key. Read [Upstage developers document](https://developers.upstage.ai/docs/getting-started/quick-start) to get your API key.\n",
|
||||
"\n",
|
||||
"> The previously used UPSTAGE_DOCUMENT_AI_API_KEY is deprecated. However, the key previously used in UPSTAGE_DOCUMENT_AI_API_KEY can now be used in UPSTAGE_API_KEY."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "21e72f3d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a05efd34",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"UPSTAGE_API_KEY\"] = \"YOUR_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "2b914a7b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective\\nDepth Up-Scaling Dahyun Kim* , Chanjun Park*1, Sanghoon Kim*+, Wonsung Lee*†, Wonho Song*\\nYunsu Kim* , Hyeonwoo Kim* , Yungi Kim, Hyeonju Lee, Jihoo Kim\\nChangbae Ahn, Seonghoon Yang, Sukyung Lee, Hyunbyung Park, Gyoungjin Gim\\nMikyoung Cha, Hwalsuk Leet , Sunghun Kim+ Upstage AI, South Korea {kdahyun, chan jun · park, limerobot, wonsung · lee, hwalsuk lee, hunkim} @ upstage · ai Abstract We introduce SOLAR 10.7B, a large language\\nmodel (LLM) with 10.7 billion parameters,\\ndemonstrating superior performance in various\\nnatural language processing (NLP) tasks. In-\\nspired by recent efforts to efficiently up-scale\\nLLMs, we present a method for scaling LLMs\\ncalled depth up-scaling (DUS), which encom-\\npasses depthwise scaling and continued pre-\\ntraining. In contrast to other LLM up-scaling\\nmethods that use mixture-of-experts, DUS does\\nnot require complex changes to train and infer-\\nence efficiently. We show experimentally that\\nDUS is simple yet effective in scaling up high-\\nperformance LLMs from small ones. Building\\non the DUS model, we additionally present SO-\\nLAR 10.7B-Instruct, a variant fine-tuned for\\ninstruction-following capabilities, surpassing\\nMixtral-8x7B-Instruct. SOLAR 10.7B is pub-\\nlicly available under the Apache 2.0 license,\\npromoting broad access and application in the\\nLLM field 1 1 Introduction The field of natural language processing (NLP)\\nhas been significantly transformed by the introduc-\\ntion of large language models (LLMs), which have\\nenhanced our understanding and interaction with\\nhuman language (Zhao et al., 2023). These ad-\\nvancements bring challenges such as the increased\\nneed to train ever larger models (Rae et al., 2021;\\nWang et al., 2023; Pan et al., 2023; Lian, 2023;\\nYao et al., 2023; Gesmundo and Maile, 2023) OW-\\ning to the performance scaling law (Kaplan et al.,\\n2020; Hernandez et al., 2021; Anil et al., 2023;\\nKaddour et al., 2023). To efficiently tackle the\\nabove, recent works in scaling language models\\nsuch as a mixture of experts (MoE) (Shazeer et al.,\\n2017; Komatsuzaki et al., 2022) have been pro-\\nposed. While those approaches are able to effi- ciently and effectively scale-up LLMs, they often\\nrequire non-trivial changes to the training and infer-\\nence framework (Gale et al., 2023), which hinders\\nwidespread applicability. Effectively and efficiently\\nscaling up LLMs whilst also retaining the simplic-\\nity for ease of use is an important problem (Alberts\\net al., 2023; Fraiwan and Khasawneh, 2023; Sallam\\net al., 2023; Bahrini et al., 2023). Inspired by Komatsuzaki et al. (2022), we\\npresent depth up-scaling (DUS), an effective and\\nefficient method to up-scale LLMs whilst also re-\\nmaining straightforward to use. DUS consists of\\nscaling the number of layers in the base model and\\ncontinually pretraining the scaled model. Unlike\\n(Komatsuzaki et al., 2022), DUS does not scale\\nthe model using MoE and rather use a depthwise\\nscaling method analogous to Tan and Le (2019)\\nwhich is adapted for the LLM architecture. Thus,\\nthere are no additional modules or dynamism as\\nwith MoE, making DUS immediately compatible\\nwith easy-to-use LLM frameworks such as Hug-\\ngingFace (Wolf et al., 2019) with no changes to\\nthe training or inference framework for maximal\\nefficiency. Furthermore, DUS is applicable to all\\ntransformer architectures, opening up new gate-\\nways to effectively and efficiently scale-up LLMs\\nin a simple manner. Using DUS, we release SO-\\nLAR 10.7B, an LLM with 10.7 billion parameters,\\nthat outperforms existing models like Llama 2 (Tou-\\nvron et al., 2023) and Mistral 7B (Jiang et al., 2023)\\nin various benchmarks. We have also developed SOLAR 10.7B-Instruct,\\na variant fine-tuned for tasks requiring strict adher-\\nence to complex instructions. It significantly out-\\nperforms the Mixtral-8x7B-Instruct model across\\nvarious evaluation metrics, evidencing an advanced\\nproficiency that exceeds the capabilities of even\\nlarger models in terms of benchmark performance. * Equal Contribution 1 Corresponding Author\\nhttps : / /huggingface.co/upstage/\\nSOLAR-1 0 · 7B-v1 . 0 By releasing SOLAR 10.7B under the Apache\\n2.0 license, we aim to promote collaboration and in-\\nnovation in NLP. This open-source approach allows 2024\\nApr\\n4\\n[cs.CL]\\narxiv:2...117.7.13' metadata={'page': 1, 'type': 'text', 'split': 'page'}\n",
|
||||
"page_content=\"Step 1-1 Step 1-2\\nOutput Output Output\\nOutput Output Output\\n24 Layers 24Layers\\nMerge\\n8Layers\\n---- 48 Layers\\nCopy\\n8 Layers Continued\\n32Layers 32Layers\\nPretraining\\n24Layers\\n24 Layers Input\\nInput Input Input Input Input\\nStep 1. Depthwise Scaling Step2. Continued Pretraining Figure 1: Depth up-scaling for the case with n = 32, s = 48, and m = 8. Depth up-scaling is achieved through a\\ndual-stage process of depthwise scaling followed by continued pretraining. for wider access and application of these models\\nby researchers and developers globally. 2 Depth Up-Scaling To efficiently scale-up LLMs, we aim to utilize pre-\\ntrained weights of base models to scale up to larger\\nLLMs (Komatsuzaki et al., 2022). While exist-\\ning methods such as Komatsuzaki et al. (2022) use\\nMoE (Shazeer et al., 2017) to scale-up the model ar-\\nchitecture, we opt for a different depthwise scaling\\nstrategy inspired by Tan and Le (2019). We then\\ncontinually pretrain the scaled model as just scaling\\nthe model without further pretraining degrades the\\nperformance. Base model. Any n-layer transformer architec-\\nture can be used but we select the 32-layer Llama\\n2 architecture as our base model. We initialize the\\nLlama 2 architecture with pretrained weights from\\nMistral 7B, as it is one of the top performers com-\\npatible with the Llama 2 architecture. By adopting\\nthe Llama 2 architecture for our base model, we\\naim to leverage the vast pool of community re-\\nsources while introducing novel modifications to\\nfurther enhance its capabilities. Depthwise scaling. From the base model with n\\nlayers, we set the target layer count s for the scaled\\nmodel, which is largely dictated by the available\\nhardware. With the above, the depthwise scaling process\\nis as follows. The base model with n layers is\\nduplicated for subsequent modification. Then, we\\nremove the final m layers from the original model\\nand the initial m layers from its duplicate, thus\\nforming two distinct models with n - m layers.\\nThese two models are concatenated to form a scaled\\nmodel with s = 2· (n-m) layers. Note that n = 32\\nfrom our base model and we set s = 48 considering our hardware constraints and the efficiency of the\\nscaled model, i.e., fitting between 7 and 13 billion\\nparameters. Naturally, this leads to the removal of\\nm = 8 layers. The depthwise scaling process with\\nn = 32, s = 48, and m = 8 is depicted in 'Step 1:\\nDepthwise Scaling' of Fig. 1. We note that a method in the community that also\\n2 'Step 1:\\nscale the model in the same manner as\\nDepthwise Scaling' of Fig. 1 has been concurrently\\ndeveloped. Continued pretraining. The performance of the\\ndepthwise scaled model initially drops below that\\nof the base LLM. Thus, we additionally apply\\nthe continued pretraining step as shown in 'Step\\n2: Continued Pretraining' of Fig. 1. Experimen-\\ntally, we observe rapid performance recovery of\\nthe scaled model during continued pretraining, a\\nphenomenon also observed in Komatsuzaki et al.\\n(2022). We consider that the particular way of\\ndepthwise scaling has isolated the heterogeneity\\nin the scaled model which allowed for this fast\\nperformance recovery. Delving deeper into the heterogeneity of the\\nscaled model, a simpler alternative to depthwise\\nscaling could be to just repeat its layers once more,\\ni.e., from n to 2n layers. Then, the 'layer distance',\\nor the difference in the layer indices in the base\\nmodel, is only bigger than 1 where layers n and\\nn + 1 are connected, i.e., at the seam. However, this results in maximum layer distance\\nat the seam, which may be too significant of a\\ndiscrepancy for continued pretraining to quickly\\nresolve. Instead, depthwise scaling sacrifices the\\n2m middle layers, thereby reducing the discrep-\\nancy at the seam and making it easier for continued 2https : / /huggingface · co/Undi 95/\\nMistral-11B-v0 · 1\" metadata={'page': 2, 'type': 'text', 'split': 'page'}\n",
|
||||
"page_content=\"Properties Instruction Training Datasets Alignment\\n Alpaca-GPT4 OpenOrca Synth. Math-Instruct Orca DPO Pairs Ultrafeedback Cleaned Synth. Math-Alignment\\n Total # Samples 52K 2.91M 126K 12.9K 60.8K 126K\\n Maximum # Samples Used 52K 100K 52K 12.9K 60.8K 20.1K\\n Open Source O O X O O Table 1: Training datasets used for the instruction and alignment tuning stages, respectively. For the instruction\\ntuning process, we utilized the Alpaca-GPT4 (Peng et al., 2023), OpenOrca (Mukherjee et al., 2023), and Synth.\\nMath-Instruct datasets, while for the alignment tuning, we employed the Orca DPO Pairs (Intel, 2023), Ultrafeedback\\nCleaned (Cui et al., 2023; Ivison et al., 2023), and Synth. Math-Alignment datasets. The 'Total # Samples indicates\\nthe total number of samples in the entire dataset. The 'Maximum # Samples Used' indicates the actual maximum\\nnumber of samples that were used in training, which could be lower than the total number of samples in a given\\ndataset. 'Open Source' indicates whether the dataset is open-sourced. pretraining to quickly recover performance. We\\nattribute the success of DUS to reducing such dis-\\ncrepancies in both the depthwise scaling and the\\ncontinued pretraining steps. We also hypothesize\\nthat other methods of depthwise scaling could also\\nwork for DUS, as long as the discrepancy in the\\nscaled model is sufficiently contained before the\\ncontinued pretraining step. Comparison to other up-scaling methods. Un-\\nlike Komatsuzaki et al. (2022), depthwise scaled\\nmodels do not require additional modules like gat-\\ning networks or dynamic expert selection. Conse-\\nquently, scaled models in DUS do not necessitate\\na distinct training framework for optimal training\\nefficiency, nor do they require specialized CUDA\\nkernels for fast inference. A DUS model can seam-\\nlessly integrate into existing training and inference\\nframeworks while maintaining high efficiency. 3 Training Details After DUS, including continued pretraining, we\\nperform fine-tuning of SOLAR 10.7B in two stages:\\n1) instruction tuning and 2) alignment tuning. Instruction tuning. In the instruction tuning\\nstage, the model is trained to follow instructions in\\na QA format (Zhang et al., 2023). We mostly use\\nopen-source datasets but also synthesize a math QA\\ndataset to enhance the model's mathematical capa-\\nbilities. A rundown of how we crafted the dataset is\\nas follows. First, seed math data are collected from\\nthe Math (Hendrycks et al., 2021) dataset only, to\\navoid contamination with commonly used bench-\\nmark datasets such as GSM8K (Cobbe et al., 2021).\\nThen, using a process similar to MetaMath (Yu\\net al., 2023), we rephrase the questions and an-\\nswers of the seed math data. We use the resulting\\nrephrased question-answer pairs as a QA dataset and call it 'Synth. Math-Instruct*. Alignment tuning. In the alignment tuning stage,\\nthe instruction-tuned model is further fine-tuned\\nto be more aligned with human or strong AI\\n(e.g., GPT4 (OpenAI, 2023)) preferences using\\nsDPO (Kim et al., 2024a), an improved version\\nof direct preference optimization (DPO) (Rafailov\\net al., 2023). Similar to the instruction tuning stage,\\nwe use mostly open-source datasets but also syn-\\nthesize a math-focused alignment dataset utilizing\\nthe 'Synth. Math-Instruct' dataset mentioned in the\\ninstruction tuning stage. The alignment data synthesis process is as\\nfollows. We take advantage of the fact that\\nthe rephrased question-answer pairs in Synth.\\nMath-Instruct data are beneficial in enhancing the\\nmodel's mathematical capabilities (see Sec. 4.3.1).\\nThus, we speculate that the rephrased answer to the\\nrephrased question is a better answer than the orig-\\ninal answer, possibly due to the interim rephrasing\\nstep. Consequently, we set the rephrased question\\nas the prompt and use the rephrased answer as the\\nchosen response and the original answer as the re-\\njected response and create the {prompt, chosen,\\nrejected} DPO tuple. We aggregate the tuples from\\nthe rephrased question-answer pairs and call the\\nresulting dataset 'Synth. Math-Alignment*. 4 Results 4.1 Experimental Details Training datasets. We present details regarding\\nour training datasets for the instruction and align-\\nment tuning stages in Tab. 1. We do not always\\nuse the entire dataset and instead subsample a set\\namount. Note that most of our training data is\\nopen-source, and the undisclosed datasets can be\\nsubstituted for open-source alternatives such as the\" metadata={'page': 3, 'type': 'text', 'split': 'page'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_upstage import UpstageLayoutAnalysisLoader\n",
|
||||
"\n",
|
||||
"file_path = \"/PATH/TO/YOUR/FILE.pdf\"\n",
|
||||
"layzer = UpstageLayoutAnalysisLoader(file_path, split=\"page\")\n",
|
||||
"\n",
|
||||
"# For improved memory efficiency, consider using the lazy_load method to load documents page by page.\n",
|
||||
"docs = layzer.load() # or layzer.lazy_load()\n",
|
||||
"\n",
|
||||
"for doc in docs[:3]:\n",
|
||||
" print(doc)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
"This covers how to use `WebBaseLoader` to load all text from `HTML` webpages into a document format that we can use downstream. For more custom logic for loading webpages look at some child class examples such as `IMSDbLoader`, `AZLyricsLoader`, and `CollegeConfidentialLoader`. \n",
|
||||
"\n",
|
||||
"If you don't want to worry about website crawling, bypassing JS-blocking sites, and data cleaning, consider using `FireCrawlLoader`.\n"
|
||||
"If you don't want to worry about website crawling, bypassing JS-blocking sites, and data cleaning, consider using `FireCrawlLoader` or the faster option `SpiderLoader`.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "9ec8a3b3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -28,14 +28,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = YuqueLoader(access_token=\"<your_personal_access_token>\")"
|
||||
],
|
||||
"execution_count": 2,
|
||||
"id": "2ea958f0327ed6e8",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "2ea958f0327ed6e8"
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = YuqueLoader(access_token=\"<your_personal_access_token>\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -69,7 +70,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
")\n",
|
||||
"\n",
|
||||
"query = \"What is the plan for the economy?\"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
@@ -162,9 +162,7 @@
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\n",
|
||||
" \"What is the plan for the economy?\"\n",
|
||||
")\n",
|
||||
"compressed_docs = compression_retriever.invoke(\"What is the plan for the economy?\")\n",
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -0,0 +1,793 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "mZaeRH_SjJWK"
|
||||
},
|
||||
"source": [
|
||||
"# Google Cloud Vertex AI Reranker\n",
|
||||
"\n",
|
||||
"> The [Vertex Search Ranking API](https://cloud.google.com/generative-ai-app-builder/docs/ranking) is one of the standalone APIs in [Vertex AI Agent Builder](https://cloud.google.com/generative-ai-app-builder/docs/builder-apis). It takes a list of documents and reranks those documents based on how relevant the documents are to a query. Compared to embeddings, which look only at the semantic similarity of a document and a query, the ranking API can give you precise scores for how well a document answers a given query. The ranking API can be used to improve the quality of search results after retrieving an initial set of candidate documents.\n",
|
||||
"\n",
|
||||
">The ranking API is stateless so there's no need to index documents before calling the API. All you need to do is pass in the query and documents. This makes the API well suited for reranking documents from any document retrievers.\n",
|
||||
"\n",
|
||||
">For more information, see [Rank and rerank documents](https://cloud.google.com/generative-ai-app-builder/docs/ranking)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "w51yJNBAirPZ"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-community langchain-google-community langchain-google-community[vertexaisearch] langchain-google-vertexai langchain-chroma langchain-text-splitters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5sN2qvW0Wxwj"
|
||||
},
|
||||
"source": [
|
||||
"### Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"id": "axookyKSnl3G"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"PROJECT_ID = \"\"\n",
|
||||
"REGION = \"\"\n",
|
||||
"RANKING_LOCATION_ID = \"global\" # @param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"# Initialize GCP project for Vertex AI\n",
|
||||
"from google.cloud import aiplatform\n",
|
||||
"\n",
|
||||
"aiplatform.init(project=PROJECT_ID, location=REGION)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "7xie5peQW2Lf"
|
||||
},
|
||||
"source": [
|
||||
"### Load and Prepare data\n",
|
||||
"\n",
|
||||
"For this example, we will be using the [Google Wiki page](https://en.wikipedia.org/wiki/Google)to demonstrate how the Vertex Ranking API works.\n",
|
||||
"\n",
|
||||
"We use a standard pipeline of `load -> split -> embed data`.\n",
|
||||
"\n",
|
||||
"The embeddings are created using the [Vertex Embeddings API](https://cloud.google.com/vertex-ai/generative-ai/docs/embeddings/get-text-embeddings#supported_models) model - `textembedding-gecko@003`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "3yY5reMbkbFS",
|
||||
"outputId": "e124299b-0fa2-4acd-aaec-d5361f008d97"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Your 1 documents have been split into 266 chunks\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_community.document_loaders import WebBaseLoader\n",
|
||||
"from langchain_google_vertexai import VertexAIEmbeddings\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"vectordb = None\n",
|
||||
"\n",
|
||||
"# Load wiki page\n",
|
||||
"loader = WebBaseLoader(\"https://en.wikipedia.org/wiki/Google\")\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"# Split doc into chunks\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=5)\n",
|
||||
"splits = text_splitter.split_documents(data)\n",
|
||||
"\n",
|
||||
"print(f\"Your {len(data)} documents have been split into {len(splits)} chunks\")\n",
|
||||
"\n",
|
||||
"if vectordb is not None: # delete existing vectordb if it already exists\n",
|
||||
" vectordb.delete_collection()\n",
|
||||
"\n",
|
||||
"embedding = VertexAIEmbeddings(model_name=\"textembedding-gecko@003\")\n",
|
||||
"vectordb = Chroma.from_documents(documents=splits, embedding=embedding)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"id": "jNmGwvrqnFF1"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"from langchain.retrievers.contextual_compression import ContextualCompressionRetriever\n",
|
||||
"from langchain_google_community.vertex_rank import VertexAIRank\n",
|
||||
"\n",
|
||||
"# Instantiate the VertexAIReranker with the SDK manager\n",
|
||||
"reranker = VertexAIRank(\n",
|
||||
" project_id=PROJECT_ID,\n",
|
||||
" location_id=RANKING_LOCATION_ID,\n",
|
||||
" ranking_config=\"default_ranking_config\",\n",
|
||||
" title_field=\"source\",\n",
|
||||
" top_n=5,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"basic_retriever = vectordb.as_retriever(search_kwargs={\"k\": 5}) # fetch top 5 documents\n",
|
||||
"\n",
|
||||
"# Create the ContextualCompressionRetriever with the VertexAIRanker as a Reranker\n",
|
||||
"retriever_with_reranker = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=reranker, base_retriever=basic_retriever\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "uMOPl7ji_nU_"
|
||||
},
|
||||
"source": [
|
||||
"### Testing out the Vertex Ranking API\n",
|
||||
"\n",
|
||||
"Let's query both the `basic_retriever` and `retriever_with_reranker` with the same query and compare the retrieved documents.\n",
|
||||
"\n",
|
||||
"The Ranking API takes in the input from the `basic_retriever` and passes it to the Ranking API.\n",
|
||||
"\n",
|
||||
"The ranking API is used to improve the quality of the ranking and determine a score that indicates the relevance of each record to the query.\n",
|
||||
"\n",
|
||||
"You can see the difference between the Unranked and the Ranked Documents. The Ranking API moves the most semantically relevant documents to the top of the context window of the LLM thus helping it form a better answer with reasoning."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 484
|
||||
},
|
||||
"id": "sJDkepoYoc0t",
|
||||
"outputId": "eac41585-3d53-4dd9-da16-51ec47eedfec"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.google.colaboratory.intrinsic+json": {
|
||||
"summary": "{\n \"name\": \"comparison_df\",\n \"rows\": 5,\n \"fields\": [\n {\n \"column\": \"Unranked Documents\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"Eventually, they changed the name to Google; the name of the search engine was a misspelling of the word googol,[21][36][37] a very large number written 10100 (1 followed by 100 zeros), picked to signify that the search engine was intended to provide large quantities of information.[38]\",\n \"^ Swant, Marty. \\\"The World's Valuable Brands\\\". Forbes. Archived from the original on October 18, 2020. Retrieved January 19, 2022.\\n\\n^ \\\"Best Global Brands\\\". Interbrand. Archived from the original on February 1, 2022. Retrieved March 7, 2011.\\n\\n^ a b c d \\\"How we started and where we are today \\u2013 Google\\\". about.google. Archived from the original on April 22, 2020. Retrieved April 24, 2021.\\n\\n^ Brezina, Corona (2013). Sergey Brin, Larry Page, Eric Schmidt, and Google (1st\\u00a0ed.). New York: Rosen Publishing Group. p.\\u00a018. ISBN\\u00a0978-1-4488-6911-4. LCCN\\u00a02011039480.\\n\\n^ a b c \\\"Our history in depth\\\". Google Company. Archived from the original on April 1, 2012. Retrieved July 15, 2017.\",\n \"The name \\\"Google\\\" originated from a misspelling of \\\"googol\\\",[211][212] which refers to the number represented by a 1 followed by one-hundred zeros. Page and Brin write in their original paper on PageRank:[33] \\\"We chose our system name, Google, because it is a common spelling of googol, or 10100[,] and fits well with our goal of building very large-scale search engines.\\\" Having found its way increasingly into everyday language, the verb \\\"google\\\" was added to the Merriam Webster Collegiate Dictionary and the Oxford English Dictionary in 2006, meaning \\\"to use the Google search engine to obtain information on the Internet.\\\"[213][214] Google's mission statement, from the outset, was \\\"to organize the world's information and make it universally accessible and useful\\\",[215] and its unofficial\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Ranked Documents\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"Eventually, they changed the name to Google; the name of the search engine was a misspelling of the word googol,[21][36][37] a very large number written 10100 (1 followed by 100 zeros), picked to signify that the search engine was intended to provide large quantities of information.[38]\",\n \"^ Swant, Marty. \\\"The World's Valuable Brands\\\". Forbes. Archived from the original on October 18, 2020. Retrieved January 19, 2022.\\n\\n^ \\\"Best Global Brands\\\". Interbrand. Archived from the original on February 1, 2022. Retrieved March 7, 2011.\\n\\n^ a b c d \\\"How we started and where we are today \\u2013 Google\\\". about.google. Archived from the original on April 22, 2020. Retrieved April 24, 2021.\\n\\n^ Brezina, Corona (2013). Sergey Brin, Larry Page, Eric Schmidt, and Google (1st\\u00a0ed.). New York: Rosen Publishing Group. p.\\u00a018. ISBN\\u00a0978-1-4488-6911-4. LCCN\\u00a02011039480.\\n\\n^ a b c \\\"Our history in depth\\\". Google Company. Archived from the original on April 1, 2012. Retrieved July 15, 2017.\",\n \"^ Meijer, Bart (January 3, 2019). \\\"Google shifted $23 billion to tax haven Bermuda in 2017: filing\\\". Reuters. Archived from the original on January 3, 2019. Retrieved January 3, 2019. Google moved 19.9 billion euros ($22.7 billion) through a Dutch shell company to Bermuda in 2017, as part of an arrangement that allows it to reduce its foreign tax bill\\n\\n^ Hamburger, Tom; Gold, Matea (April 13, 2014). \\\"Google, once disdainful of lobbying, now a master of Washington influence\\\". The Washington Post. Archived from the original on October 27, 2017. Retrieved August 22, 2017.\\n\\n^ Koller, David (January 2004). \\\"Origin of the name, \\\"Google.\\\"\\\". Stanford University. Archived from the original on June 27, 2012. Retrieved May 28, 2006.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}",
|
||||
"type": "dataframe",
|
||||
"variable_name": "comparison_df"
|
||||
},
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <div id=\"df-43c4f5f2-c31d-4664-85dd-60cad39bd5fa\" class=\"colab-df-container\">\n",
|
||||
" <div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>Unranked Documents</th>\n",
|
||||
" <th>Ranked Documents</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>^ a b Brin, Sergey; Page, Lawrence (1998). \"The anatomy of a large-scale hypertextual Web search engine\" (PDF). Computer Networks and ISDN Systems. 30 (1–7): 107–117. CiteSeerX 10.1.1.115.5930. doi:10.1016/S0169-7552(98)00110-X. ISSN 0169-7552. S2CID 7587743. Archived (PDF) from the original on September 27, 2015. Retrieved April 7, 2019.\\n\\n^ \"About: RankDex\". Archived from the original on January 20, 2012. Retrieved September 29, 2010., RankDex\\n\\n^ \"Method for node ranking in a linked database\". Google Patents. Archived from the original on October 15, 2015. Retrieved October 19, 2015.\\n\\n^ Koller, David (January 2004). \"Origin of the name \"Google\"\". Stanford University. Archived from the original on June 27, 2012.</td>\n",
|
||||
" <td>The name \"Google\" originated from a misspelling of \"googol\",[211][212] which refers to the number represented by a 1 followed by one-hundred zeros. Page and Brin write in their original paper on PageRank:[33] \"We chose our system name, Google, because it is a common spelling of googol, or 10100[,] and fits well with our goal of building very large-scale search engines.\" Having found its way increasingly into everyday language, the verb \"google\" was added to the Merriam Webster Collegiate Dictionary and the Oxford English Dictionary in 2006, meaning \"to use the Google search engine to obtain information on the Internet.\"[213][214] Google's mission statement, from the outset, was \"to organize the world's information and make it universally accessible and useful\",[215] and its unofficial</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>Eventually, they changed the name to Google; the name of the search engine was a misspelling of the word googol,[21][36][37] a very large number written 10100 (1 followed by 100 zeros), picked to signify that the search engine was intended to provide large quantities of information.[38]</td>\n",
|
||||
" <td>Eventually, they changed the name to Google; the name of the search engine was a misspelling of the word googol,[21][36][37] a very large number written 10100 (1 followed by 100 zeros), picked to signify that the search engine was intended to provide large quantities of information.[38]</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>The name \"Google\" originated from a misspelling of \"googol\",[211][212] which refers to the number represented by a 1 followed by one-hundred zeros. Page and Brin write in their original paper on PageRank:[33] \"We chose our system name, Google, because it is a common spelling of googol, or 10100[,] and fits well with our goal of building very large-scale search engines.\" Having found its way increasingly into everyday language, the verb \"google\" was added to the Merriam Webster Collegiate Dictionary and the Oxford English Dictionary in 2006, meaning \"to use the Google search engine to obtain information on the Internet.\"[213][214] Google's mission statement, from the outset, was \"to organize the world's information and make it universally accessible and useful\",[215] and its unofficial</td>\n",
|
||||
" <td>^ Meijer, Bart (January 3, 2019). \"Google shifted $23 billion to tax haven Bermuda in 2017: filing\". Reuters. Archived from the original on January 3, 2019. Retrieved January 3, 2019. Google moved 19.9 billion euros ($22.7 billion) through a Dutch shell company to Bermuda in 2017, as part of an arrangement that allows it to reduce its foreign tax bill\\n\\n^ Hamburger, Tom; Gold, Matea (April 13, 2014). \"Google, once disdainful of lobbying, now a master of Washington influence\". The Washington Post. Archived from the original on October 27, 2017. Retrieved August 22, 2017.\\n\\n^ Koller, David (January 2004). \"Origin of the name, \"Google.\"\". Stanford University. Archived from the original on June 27, 2012. Retrieved May 28, 2006.</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>^ Meijer, Bart (January 3, 2019). \"Google shifted $23 billion to tax haven Bermuda in 2017: filing\". Reuters. Archived from the original on January 3, 2019. Retrieved January 3, 2019. Google moved 19.9 billion euros ($22.7 billion) through a Dutch shell company to Bermuda in 2017, as part of an arrangement that allows it to reduce its foreign tax bill\\n\\n^ Hamburger, Tom; Gold, Matea (April 13, 2014). \"Google, once disdainful of lobbying, now a master of Washington influence\". The Washington Post. Archived from the original on October 27, 2017. Retrieved August 22, 2017.\\n\\n^ Koller, David (January 2004). \"Origin of the name, \"Google.\"\". Stanford University. Archived from the original on June 27, 2012. Retrieved May 28, 2006.</td>\n",
|
||||
" <td>^ a b Brin, Sergey; Page, Lawrence (1998). \"The anatomy of a large-scale hypertextual Web search engine\" (PDF). Computer Networks and ISDN Systems. 30 (1–7): 107–117. CiteSeerX 10.1.1.115.5930. doi:10.1016/S0169-7552(98)00110-X. ISSN 0169-7552. S2CID 7587743. Archived (PDF) from the original on September 27, 2015. Retrieved April 7, 2019.\\n\\n^ \"About: RankDex\". Archived from the original on January 20, 2012. Retrieved September 29, 2010., RankDex\\n\\n^ \"Method for node ranking in a linked database\". Google Patents. Archived from the original on October 15, 2015. Retrieved October 19, 2015.\\n\\n^ Koller, David (January 2004). \"Origin of the name \"Google\"\". Stanford University. Archived from the original on June 27, 2012.</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>^ Swant, Marty. \"The World's Valuable Brands\". Forbes. Archived from the original on October 18, 2020. Retrieved January 19, 2022.\\n\\n^ \"Best Global Brands\". Interbrand. Archived from the original on February 1, 2022. Retrieved March 7, 2011.\\n\\n^ a b c d \"How we started and where we are today – Google\". about.google. Archived from the original on April 22, 2020. Retrieved April 24, 2021.\\n\\n^ Brezina, Corona (2013). Sergey Brin, Larry Page, Eric Schmidt, and Google (1st ed.). New York: Rosen Publishing Group. p. 18. ISBN 978-1-4488-6911-4. LCCN 2011039480.\\n\\n^ a b c \"Our history in depth\". Google Company. Archived from the original on April 1, 2012. Retrieved July 15, 2017.</td>\n",
|
||||
" <td>^ Swant, Marty. \"The World's Valuable Brands\". Forbes. Archived from the original on October 18, 2020. Retrieved January 19, 2022.\\n\\n^ \"Best Global Brands\". Interbrand. Archived from the original on February 1, 2022. Retrieved March 7, 2011.\\n\\n^ a b c d \"How we started and where we are today – Google\". about.google. Archived from the original on April 22, 2020. Retrieved April 24, 2021.\\n\\n^ Brezina, Corona (2013). Sergey Brin, Larry Page, Eric Schmidt, and Google (1st ed.). New York: Rosen Publishing Group. p. 18. ISBN 978-1-4488-6911-4. LCCN 2011039480.\\n\\n^ a b c \"Our history in depth\". Google Company. Archived from the original on April 1, 2012. Retrieved July 15, 2017.</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>\n",
|
||||
" <div class=\"colab-df-buttons\">\n",
|
||||
"\n",
|
||||
" <div class=\"colab-df-container\">\n",
|
||||
" <button class=\"colab-df-convert\" onclick=\"convertToInteractive('df-43c4f5f2-c31d-4664-85dd-60cad39bd5fa')\"\n",
|
||||
" title=\"Convert this dataframe to an interactive table.\"\n",
|
||||
" style=\"display:none;\">\n",
|
||||
"\n",
|
||||
" <svg xmlns=\"http://www.w3.org/2000/svg\" height=\"24px\" viewBox=\"0 -960 960 960\">\n",
|
||||
" <path d=\"M120-120v-720h720v720H120Zm60-500h600v-160H180v160Zm220 220h160v-160H400v160Zm0 220h160v-160H400v160ZM180-400h160v-160H180v160Zm440 0h160v-160H620v160ZM180-180h160v-160H180v160Zm440 0h160v-160H620v160Z\"/>\n",
|
||||
" </svg>\n",
|
||||
" </button>\n",
|
||||
"\n",
|
||||
" <style>\n",
|
||||
" .colab-df-container {\n",
|
||||
" display:flex;\n",
|
||||
" gap: 12px;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .colab-df-convert {\n",
|
||||
" background-color: #E8F0FE;\n",
|
||||
" border: none;\n",
|
||||
" border-radius: 50%;\n",
|
||||
" cursor: pointer;\n",
|
||||
" display: none;\n",
|
||||
" fill: #1967D2;\n",
|
||||
" height: 32px;\n",
|
||||
" padding: 0 0 0 0;\n",
|
||||
" width: 32px;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .colab-df-convert:hover {\n",
|
||||
" background-color: #E2EBFA;\n",
|
||||
" box-shadow: 0px 1px 2px rgba(60, 64, 67, 0.3), 0px 1px 3px 1px rgba(60, 64, 67, 0.15);\n",
|
||||
" fill: #174EA6;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .colab-df-buttons div {\n",
|
||||
" margin-bottom: 4px;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" [theme=dark] .colab-df-convert {\n",
|
||||
" background-color: #3B4455;\n",
|
||||
" fill: #D2E3FC;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" [theme=dark] .colab-df-convert:hover {\n",
|
||||
" background-color: #434B5C;\n",
|
||||
" box-shadow: 0px 1px 3px 1px rgba(0, 0, 0, 0.15);\n",
|
||||
" filter: drop-shadow(0px 1px 2px rgba(0, 0, 0, 0.3));\n",
|
||||
" fill: #FFFFFF;\n",
|
||||
" }\n",
|
||||
" </style>\n",
|
||||
"\n",
|
||||
" <script>\n",
|
||||
" const buttonEl =\n",
|
||||
" document.querySelector('#df-43c4f5f2-c31d-4664-85dd-60cad39bd5fa button.colab-df-convert');\n",
|
||||
" buttonEl.style.display =\n",
|
||||
" google.colab.kernel.accessAllowed ? 'block' : 'none';\n",
|
||||
"\n",
|
||||
" async function convertToInteractive(key) {\n",
|
||||
" const element = document.querySelector('#df-43c4f5f2-c31d-4664-85dd-60cad39bd5fa');\n",
|
||||
" const dataTable =\n",
|
||||
" await google.colab.kernel.invokeFunction('convertToInteractive',\n",
|
||||
" [key], {});\n",
|
||||
" if (!dataTable) return;\n",
|
||||
"\n",
|
||||
" const docLinkHtml = 'Like what you see? Visit the ' +\n",
|
||||
" '<a target=\"_blank\" href=https://colab.research.google.com/notebooks/data_table.ipynb>data table notebook</a>'\n",
|
||||
" + ' to learn more about interactive tables.';\n",
|
||||
" element.innerHTML = '';\n",
|
||||
" dataTable['output_type'] = 'display_data';\n",
|
||||
" await google.colab.output.renderOutput(dataTable, element);\n",
|
||||
" const docLink = document.createElement('div');\n",
|
||||
" docLink.innerHTML = docLinkHtml;\n",
|
||||
" element.appendChild(docLink);\n",
|
||||
" }\n",
|
||||
" </script>\n",
|
||||
" </div>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"<div id=\"df-fff80078-f146-44f5-9eff-d91c9305c276\">\n",
|
||||
" <button class=\"colab-df-quickchart\" onclick=\"quickchart('df-fff80078-f146-44f5-9eff-d91c9305c276')\"\n",
|
||||
" title=\"Suggest charts\"\n",
|
||||
" style=\"display:none;\">\n",
|
||||
"\n",
|
||||
"<svg xmlns=\"http://www.w3.org/2000/svg\" height=\"24px\"viewBox=\"0 0 24 24\"\n",
|
||||
" width=\"24px\">\n",
|
||||
" <g>\n",
|
||||
" <path d=\"M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zM9 17H7v-7h2v7zm4 0h-2V7h2v10zm4 0h-2v-4h2v4z\"/>\n",
|
||||
" </g>\n",
|
||||
"</svg>\n",
|
||||
" </button>\n",
|
||||
"\n",
|
||||
"<style>\n",
|
||||
" .colab-df-quickchart {\n",
|
||||
" --bg-color: #E8F0FE;\n",
|
||||
" --fill-color: #1967D2;\n",
|
||||
" --hover-bg-color: #E2EBFA;\n",
|
||||
" --hover-fill-color: #174EA6;\n",
|
||||
" --disabled-fill-color: #AAA;\n",
|
||||
" --disabled-bg-color: #DDD;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" [theme=dark] .colab-df-quickchart {\n",
|
||||
" --bg-color: #3B4455;\n",
|
||||
" --fill-color: #D2E3FC;\n",
|
||||
" --hover-bg-color: #434B5C;\n",
|
||||
" --hover-fill-color: #FFFFFF;\n",
|
||||
" --disabled-bg-color: #3B4455;\n",
|
||||
" --disabled-fill-color: #666;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .colab-df-quickchart {\n",
|
||||
" background-color: var(--bg-color);\n",
|
||||
" border: none;\n",
|
||||
" border-radius: 50%;\n",
|
||||
" cursor: pointer;\n",
|
||||
" display: none;\n",
|
||||
" fill: var(--fill-color);\n",
|
||||
" height: 32px;\n",
|
||||
" padding: 0;\n",
|
||||
" width: 32px;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .colab-df-quickchart:hover {\n",
|
||||
" background-color: var(--hover-bg-color);\n",
|
||||
" box-shadow: 0 1px 2px rgba(60, 64, 67, 0.3), 0 1px 3px 1px rgba(60, 64, 67, 0.15);\n",
|
||||
" fill: var(--button-hover-fill-color);\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .colab-df-quickchart-complete:disabled,\n",
|
||||
" .colab-df-quickchart-complete:disabled:hover {\n",
|
||||
" background-color: var(--disabled-bg-color);\n",
|
||||
" fill: var(--disabled-fill-color);\n",
|
||||
" box-shadow: none;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .colab-df-spinner {\n",
|
||||
" border: 2px solid var(--fill-color);\n",
|
||||
" border-color: transparent;\n",
|
||||
" border-bottom-color: var(--fill-color);\n",
|
||||
" animation:\n",
|
||||
" spin 1s steps(1) infinite;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" @keyframes spin {\n",
|
||||
" 0% {\n",
|
||||
" border-color: transparent;\n",
|
||||
" border-bottom-color: var(--fill-color);\n",
|
||||
" border-left-color: var(--fill-color);\n",
|
||||
" }\n",
|
||||
" 20% {\n",
|
||||
" border-color: transparent;\n",
|
||||
" border-left-color: var(--fill-color);\n",
|
||||
" border-top-color: var(--fill-color);\n",
|
||||
" }\n",
|
||||
" 30% {\n",
|
||||
" border-color: transparent;\n",
|
||||
" border-left-color: var(--fill-color);\n",
|
||||
" border-top-color: var(--fill-color);\n",
|
||||
" border-right-color: var(--fill-color);\n",
|
||||
" }\n",
|
||||
" 40% {\n",
|
||||
" border-color: transparent;\n",
|
||||
" border-right-color: var(--fill-color);\n",
|
||||
" border-top-color: var(--fill-color);\n",
|
||||
" }\n",
|
||||
" 60% {\n",
|
||||
" border-color: transparent;\n",
|
||||
" border-right-color: var(--fill-color);\n",
|
||||
" }\n",
|
||||
" 80% {\n",
|
||||
" border-color: transparent;\n",
|
||||
" border-right-color: var(--fill-color);\n",
|
||||
" border-bottom-color: var(--fill-color);\n",
|
||||
" }\n",
|
||||
" 90% {\n",
|
||||
" border-color: transparent;\n",
|
||||
" border-bottom-color: var(--fill-color);\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"\n",
|
||||
" <script>\n",
|
||||
" async function quickchart(key) {\n",
|
||||
" const quickchartButtonEl =\n",
|
||||
" document.querySelector('#' + key + ' button');\n",
|
||||
" quickchartButtonEl.disabled = true; // To prevent multiple clicks.\n",
|
||||
" quickchartButtonEl.classList.add('colab-df-spinner');\n",
|
||||
" try {\n",
|
||||
" const charts = await google.colab.kernel.invokeFunction(\n",
|
||||
" 'suggestCharts', [key], {});\n",
|
||||
" } catch (error) {\n",
|
||||
" console.error('Error during call to suggestCharts:', error);\n",
|
||||
" }\n",
|
||||
" quickchartButtonEl.classList.remove('colab-df-spinner');\n",
|
||||
" quickchartButtonEl.classList.add('colab-df-quickchart-complete');\n",
|
||||
" }\n",
|
||||
" (() => {\n",
|
||||
" let quickchartButtonEl =\n",
|
||||
" document.querySelector('#df-fff80078-f146-44f5-9eff-d91c9305c276 button');\n",
|
||||
" quickchartButtonEl.style.display =\n",
|
||||
" google.colab.kernel.accessAllowed ? 'block' : 'none';\n",
|
||||
" })();\n",
|
||||
" </script>\n",
|
||||
"</div>\n",
|
||||
"\n",
|
||||
" <div id=\"id_7648ee4a-f747-429c-820f-e03d3c59f765\">\n",
|
||||
" <style>\n",
|
||||
" .colab-df-generate {\n",
|
||||
" background-color: #E8F0FE;\n",
|
||||
" border: none;\n",
|
||||
" border-radius: 50%;\n",
|
||||
" cursor: pointer;\n",
|
||||
" display: none;\n",
|
||||
" fill: #1967D2;\n",
|
||||
" height: 32px;\n",
|
||||
" padding: 0 0 0 0;\n",
|
||||
" width: 32px;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .colab-df-generate:hover {\n",
|
||||
" background-color: #E2EBFA;\n",
|
||||
" box-shadow: 0px 1px 2px rgba(60, 64, 67, 0.3), 0px 1px 3px 1px rgba(60, 64, 67, 0.15);\n",
|
||||
" fill: #174EA6;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" [theme=dark] .colab-df-generate {\n",
|
||||
" background-color: #3B4455;\n",
|
||||
" fill: #D2E3FC;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" [theme=dark] .colab-df-generate:hover {\n",
|
||||
" background-color: #434B5C;\n",
|
||||
" box-shadow: 0px 1px 3px 1px rgba(0, 0, 0, 0.15);\n",
|
||||
" filter: drop-shadow(0px 1px 2px rgba(0, 0, 0, 0.3));\n",
|
||||
" fill: #FFFFFF;\n",
|
||||
" }\n",
|
||||
" </style>\n",
|
||||
" <button class=\"colab-df-generate\" onclick=\"generateWithVariable('comparison_df')\"\n",
|
||||
" title=\"Generate code using this dataframe.\"\n",
|
||||
" style=\"display:none;\">\n",
|
||||
"\n",
|
||||
" <svg xmlns=\"http://www.w3.org/2000/svg\" height=\"24px\"viewBox=\"0 0 24 24\"\n",
|
||||
" width=\"24px\">\n",
|
||||
" <path d=\"M7,19H8.4L18.45,9,17,7.55,7,17.6ZM5,21V16.75L18.45,3.32a2,2,0,0,1,2.83,0l1.4,1.43a1.91,1.91,0,0,1,.58,1.4,1.91,1.91,0,0,1-.58,1.4L9.25,21ZM18.45,9,17,7.55Zm-12,3A5.31,5.31,0,0,0,4.9,8.1,5.31,5.31,0,0,0,1,6.5,5.31,5.31,0,0,0,4.9,4.9,5.31,5.31,0,0,0,6.5,1,5.31,5.31,0,0,0,8.1,4.9,5.31,5.31,0,0,0,12,6.5,5.46,5.46,0,0,0,6.5,12Z\"/>\n",
|
||||
" </svg>\n",
|
||||
" </button>\n",
|
||||
" <script>\n",
|
||||
" (() => {\n",
|
||||
" const buttonEl =\n",
|
||||
" document.querySelector('#id_7648ee4a-f747-429c-820f-e03d3c59f765 button.colab-df-generate');\n",
|
||||
" buttonEl.style.display =\n",
|
||||
" google.colab.kernel.accessAllowed ? 'block' : 'none';\n",
|
||||
"\n",
|
||||
" buttonEl.onclick = () => {\n",
|
||||
" google.colab.notebook.generateWithVariable('comparison_df');\n",
|
||||
" }\n",
|
||||
" })();\n",
|
||||
" </script>\n",
|
||||
" </div>\n",
|
||||
"\n",
|
||||
" </div>\n",
|
||||
" </div>\n"
|
||||
],
|
||||
"text/plain": [
|
||||
" Unranked Documents \\\n",
|
||||
"0 ^ a b Brin, Sergey; Page, Lawrence (1998). \"The anatomy of a large-scale hypertextual Web search engine\" (PDF). Computer Networks and ISDN Systems. 30 (1–7): 107–117. CiteSeerX 10.1.1.115.5930. doi:10.1016/S0169-7552(98)00110-X. ISSN 0169-7552. S2CID 7587743. Archived (PDF) from the original on September 27, 2015. Retrieved April 7, 2019.\\n\\n^ \"About: RankDex\". Archived from the original on January 20, 2012. Retrieved September 29, 2010., RankDex\\n\\n^ \"Method for node ranking in a linked database\". Google Patents. Archived from the original on October 15, 2015. Retrieved October 19, 2015.\\n\\n^ Koller, David (January 2004). \"Origin of the name \"Google\"\". Stanford University. Archived from the original on June 27, 2012. \n",
|
||||
"1 Eventually, they changed the name to Google; the name of the search engine was a misspelling of the word googol,[21][36][37] a very large number written 10100 (1 followed by 100 zeros), picked to signify that the search engine was intended to provide large quantities of information.[38] \n",
|
||||
"2 The name \"Google\" originated from a misspelling of \"googol\",[211][212] which refers to the number represented by a 1 followed by one-hundred zeros. Page and Brin write in their original paper on PageRank:[33] \"We chose our system name, Google, because it is a common spelling of googol, or 10100[,] and fits well with our goal of building very large-scale search engines.\" Having found its way increasingly into everyday language, the verb \"google\" was added to the Merriam Webster Collegiate Dictionary and the Oxford English Dictionary in 2006, meaning \"to use the Google search engine to obtain information on the Internet.\"[213][214] Google's mission statement, from the outset, was \"to organize the world's information and make it universally accessible and useful\",[215] and its unofficial \n",
|
||||
"3 ^ Meijer, Bart (January 3, 2019). \"Google shifted $23 billion to tax haven Bermuda in 2017: filing\". Reuters. Archived from the original on January 3, 2019. Retrieved January 3, 2019. Google moved 19.9 billion euros ($22.7 billion) through a Dutch shell company to Bermuda in 2017, as part of an arrangement that allows it to reduce its foreign tax bill\\n\\n^ Hamburger, Tom; Gold, Matea (April 13, 2014). \"Google, once disdainful of lobbying, now a master of Washington influence\". The Washington Post. Archived from the original on October 27, 2017. Retrieved August 22, 2017.\\n\\n^ Koller, David (January 2004). \"Origin of the name, \"Google.\"\". Stanford University. Archived from the original on June 27, 2012. Retrieved May 28, 2006. \n",
|
||||
"4 ^ Swant, Marty. \"The World's Valuable Brands\". Forbes. Archived from the original on October 18, 2020. Retrieved January 19, 2022.\\n\\n^ \"Best Global Brands\". Interbrand. Archived from the original on February 1, 2022. Retrieved March 7, 2011.\\n\\n^ a b c d \"How we started and where we are today – Google\". about.google. Archived from the original on April 22, 2020. Retrieved April 24, 2021.\\n\\n^ Brezina, Corona (2013). Sergey Brin, Larry Page, Eric Schmidt, and Google (1st ed.). New York: Rosen Publishing Group. p. 18. ISBN 978-1-4488-6911-4. LCCN 2011039480.\\n\\n^ a b c \"Our history in depth\". Google Company. Archived from the original on April 1, 2012. Retrieved July 15, 2017. \n",
|
||||
"\n",
|
||||
" Ranked Documents \n",
|
||||
"0 The name \"Google\" originated from a misspelling of \"googol\",[211][212] which refers to the number represented by a 1 followed by one-hundred zeros. Page and Brin write in their original paper on PageRank:[33] \"We chose our system name, Google, because it is a common spelling of googol, or 10100[,] and fits well with our goal of building very large-scale search engines.\" Having found its way increasingly into everyday language, the verb \"google\" was added to the Merriam Webster Collegiate Dictionary and the Oxford English Dictionary in 2006, meaning \"to use the Google search engine to obtain information on the Internet.\"[213][214] Google's mission statement, from the outset, was \"to organize the world's information and make it universally accessible and useful\",[215] and its unofficial \n",
|
||||
"1 Eventually, they changed the name to Google; the name of the search engine was a misspelling of the word googol,[21][36][37] a very large number written 10100 (1 followed by 100 zeros), picked to signify that the search engine was intended to provide large quantities of information.[38] \n",
|
||||
"2 ^ Meijer, Bart (January 3, 2019). \"Google shifted $23 billion to tax haven Bermuda in 2017: filing\". Reuters. Archived from the original on January 3, 2019. Retrieved January 3, 2019. Google moved 19.9 billion euros ($22.7 billion) through a Dutch shell company to Bermuda in 2017, as part of an arrangement that allows it to reduce its foreign tax bill\\n\\n^ Hamburger, Tom; Gold, Matea (April 13, 2014). \"Google, once disdainful of lobbying, now a master of Washington influence\". The Washington Post. Archived from the original on October 27, 2017. Retrieved August 22, 2017.\\n\\n^ Koller, David (January 2004). \"Origin of the name, \"Google.\"\". Stanford University. Archived from the original on June 27, 2012. Retrieved May 28, 2006. \n",
|
||||
"3 ^ a b Brin, Sergey; Page, Lawrence (1998). \"The anatomy of a large-scale hypertextual Web search engine\" (PDF). Computer Networks and ISDN Systems. 30 (1–7): 107–117. CiteSeerX 10.1.1.115.5930. doi:10.1016/S0169-7552(98)00110-X. ISSN 0169-7552. S2CID 7587743. Archived (PDF) from the original on September 27, 2015. Retrieved April 7, 2019.\\n\\n^ \"About: RankDex\". Archived from the original on January 20, 2012. Retrieved September 29, 2010., RankDex\\n\\n^ \"Method for node ranking in a linked database\". Google Patents. Archived from the original on October 15, 2015. Retrieved October 19, 2015.\\n\\n^ Koller, David (January 2004). \"Origin of the name \"Google\"\". Stanford University. Archived from the original on June 27, 2012. \n",
|
||||
"4 ^ Swant, Marty. \"The World's Valuable Brands\". Forbes. Archived from the original on October 18, 2020. Retrieved January 19, 2022.\\n\\n^ \"Best Global Brands\". Interbrand. Archived from the original on February 1, 2022. Retrieved March 7, 2011.\\n\\n^ a b c d \"How we started and where we are today – Google\". about.google. Archived from the original on April 22, 2020. Retrieved April 24, 2021.\\n\\n^ Brezina, Corona (2013). Sergey Brin, Larry Page, Eric Schmidt, and Google (1st ed.). New York: Rosen Publishing Group. p. 18. ISBN 978-1-4488-6911-4. LCCN 2011039480.\\n\\n^ a b c \"Our history in depth\". Google Company. Archived from the original on April 1, 2012. Retrieved July 15, 2017. "
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"# Use the basic_retriever and the retriever_with_reranker to get relevant documents\n",
|
||||
"query = \"how did the name google originate?\"\n",
|
||||
"retrieved_docs = basic_retriever.invoke(query)\n",
|
||||
"reranked_docs = retriever_with_reranker.invoke(query)\n",
|
||||
"\n",
|
||||
"# Create two lists of results for unranked and ranked docs\n",
|
||||
"unranked_docs_content = [docs.page_content for docs in retrieved_docs]\n",
|
||||
"ranked_docs_content = [docs.page_content for docs in reranked_docs]\n",
|
||||
"\n",
|
||||
"# Create a comparison DataFrame using the padded lists\n",
|
||||
"comparison_df = pd.DataFrame(\n",
|
||||
" {\n",
|
||||
" \"Unranked Documents\": unranked_docs_content,\n",
|
||||
" \"Ranked Documents\": ranked_docs_content,\n",
|
||||
" }\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"comparison_df"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "ud_cnGszb1i9"
|
||||
},
|
||||
"source": [
|
||||
"Let's inspect a couple of reranked documents. We observe that the retriever still returns the relevant Langchain type [documents](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) but as part of the metadata field, we also recieve the `relevance_score` from the Ranking API."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 225
|
||||
},
|
||||
"id": "FCDvNjPuAYVv",
|
||||
"outputId": "23454993-0251-457b-8733-bd413e1b1043"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <style>\n",
|
||||
" pre {\n",
|
||||
" white-space: pre-wrap;\n",
|
||||
" }\n",
|
||||
" </style>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 0\n",
|
||||
"page_content='The name \"Google\" originated from a misspelling of \"googol\",[211][212] which refers to the number represented by a 1 followed by one-hundred zeros. Page and Brin write in their original paper on PageRank:[33] \"We chose our system name, Google, because it is a common spelling of googol, or 10100[,] and fits well with our goal of building very large-scale search engines.\" Having found its way increasingly into everyday language, the verb \"google\" was added to the Merriam Webster Collegiate Dictionary and the Oxford English Dictionary in 2006, meaning \"to use the Google search engine to obtain information on the Internet.\"[213][214] Google\\'s mission statement, from the outset, was \"to organize the world\\'s information and make it universally accessible and useful\",[215] and its unofficial' metadata={'id': '2', 'relevance_score': 0.9800000190734863, 'source': 'https://en.wikipedia.org/wiki/Google'}\n",
|
||||
"----------------------------------------------------------\n",
|
||||
"\n",
|
||||
"Document 1\n",
|
||||
"page_content='Eventually, they changed the name to Google; the name of the search engine was a misspelling of the word googol,[21][36][37] a very large number written 10100 (1 followed by 100 zeros), picked to signify that the search engine was intended to provide large quantities of information.[38]' metadata={'id': '1', 'relevance_score': 0.75, 'source': 'https://en.wikipedia.org/wiki/Google'}\n",
|
||||
"----------------------------------------------------------\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for i in range(2):\n",
|
||||
" print(f\"Document {i}\")\n",
|
||||
" print(reranked_docs[i])\n",
|
||||
" print(\"----------------------------------------------------------\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "hELRT4bMeqcs"
|
||||
},
|
||||
"source": [
|
||||
"### Putting it all together\n",
|
||||
"\n",
|
||||
"This shows an example of a complete RAG chain with a simple prompt template on how you can perform reranking using the Vertex Ranking API.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 17
|
||||
},
|
||||
"id": "u1cfbdZyTgeq",
|
||||
"outputId": "3395ca20-5327-4143-e769-ddefb7e1bed0"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <style>\n",
|
||||
" pre {\n",
|
||||
" white-space: pre-wrap;\n",
|
||||
" }\n",
|
||||
" </style>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
||||
"from langchain_google_vertexai import VertexAI\n",
|
||||
"\n",
|
||||
"llm = VertexAI(model_name=\"gemini-1.0-pro-002\")\n",
|
||||
"\n",
|
||||
"# Instantiate the VertexAIReranker with the SDK manager\n",
|
||||
"reranker = VertexAIRank(\n",
|
||||
" project_id=PROJECT_ID,\n",
|
||||
" location_id=RANKING_LOCATION_ID,\n",
|
||||
" ranking_config=\"default_ranking_config\",\n",
|
||||
" title_field=\"source\", # metadata field key from your existing documents\n",
|
||||
" top_n=5,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# value of k can be set to a higher value as well for tweaking performance\n",
|
||||
"# eg: # of docs: basic_retriever(100) -> reranker(5)\n",
|
||||
"basic_retriever = vectordb.as_retriever(search_kwargs={\"k\": 5}) # fetch top 5 documents\n",
|
||||
"\n",
|
||||
"# Create the ContextualCompressionRetriever with the VertexAIRanker as a Reranker\n",
|
||||
"retriever_with_reranker = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=reranker, base_retriever=basic_retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"template = \"\"\"\n",
|
||||
"<context>\n",
|
||||
"{context}\n",
|
||||
"</context>\n",
|
||||
"\n",
|
||||
"Question:\n",
|
||||
"{query}\n",
|
||||
"\n",
|
||||
"Don't give information outside the context or repeat your findings.\n",
|
||||
"Answer:\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"reranker_setup_and_retrieval = RunnableParallel(\n",
|
||||
" {\"context\": retriever_with_reranker, \"query\": RunnablePassthrough()}\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = reranker_setup_and_retrieval | prompt | llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 17
|
||||
},
|
||||
"id": "dv68uTmvT7SJ",
|
||||
"outputId": "254ebc12-fbb3-4321-9864-604383f071fe"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <style>\n",
|
||||
" pre {\n",
|
||||
" white-space: pre-wrap;\n",
|
||||
" }\n",
|
||||
" </style>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"how did the name google originate?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 53
|
||||
},
|
||||
"id": "taZAoM_bU2_f",
|
||||
"outputId": "3a0e1c44-8760-479c-d4a9-030929cb442b"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <style>\n",
|
||||
" pre {\n",
|
||||
" white-space: pre-wrap;\n",
|
||||
" }\n",
|
||||
" </style>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.google.colaboratory.intrinsic+json": {
|
||||
"type": "string"
|
||||
},
|
||||
"text/plain": [
|
||||
"'The name \"Google\" originated as a misspelling of the word \"googol,\" a mathematical term for the number 1 followed by 100 zeros. Larry Page and Sergey Brin, the founders of Google, chose the name because it reflected their goal of building a search engine that could handle massive amounts of information. \\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.invoke(query)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -39,8 +39,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet google-cloud-documentai\n",
|
||||
"%pip install --upgrade --quiet google-cloud-documentai-toolbox"
|
||||
"%pip install --upgrade --quiet langchain-google-community[docai]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -71,8 +70,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders.blob_loaders import Blob\n",
|
||||
"from langchain_community.document_loaders.parsers import DocAIParser"
|
||||
"from langchain_core.document_loaders.blob_loaders import Blob\n",
|
||||
"from langchain_google_community import DocAIParser"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -31,8 +31,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_transformers import GoogleTranslateTransformer\n",
|
||||
"from langchain_core.documents import Document"
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_google_community import GoogleTranslateTransformer"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
254
docs/docs/integrations/document_transformers/jina_rerank.ipynb
Normal file
254
docs/docs/integrations/document_transformers/jina_rerank.ipynb
Normal file
@@ -0,0 +1,254 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6ff09ab-c736-4a18-a717-563b4e29d22d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Jina Reranker"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1288789a-4c30-4fc3-90c7-dd1741a2550b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook shows how to use Jina Reranker for document compression and retrieval."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a0e4d52e-3968-4f8b-9865-a886f27e5feb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain langchain-openai langchain-community langchain-text-splitters langchainhub\n",
|
||||
"\n",
|
||||
"%pip install --upgrade --quiet faiss\n",
|
||||
"\n",
|
||||
"# OR (depending on Python version)\n",
|
||||
"\n",
|
||||
"%pip install --upgrade --quiet faiss_cpu"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d1fc07a6-8e01-4aa5-8ed4-ca2b0bfca70c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Helper function for printing docs\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def pretty_print_docs(docs):\n",
|
||||
" print(\n",
|
||||
" f\"\\n{'-' * 100}\\n\".join(\n",
|
||||
" [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n",
|
||||
" )\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d8ec4823-fdc1-4339-8a25-da598a1e2a4c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up the base vector store retriever"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9db25269-e798-496f-8fb9-2bb280735118",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ce01a2b5-d7f4-4902-9156-9a3a86704f40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Set the Jina and OpenAI API keys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6692d5c5-c84a-4d42-8dd8-5ce90ff56d20",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"os.environ[\"JINA_API_KEY\"] = getpass.getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "981159af-fa3c-4f75-adb4-1a4de1950f2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.embeddings import JinaEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
|
||||
"\n",
|
||||
"documents = TextLoader(\n",
|
||||
" \"../../modules/state_of_the_union.txt\",\n",
|
||||
").load()\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embedding = JinaEmbeddings(model_name=\"jina-embeddings-v2-base-en\")\n",
|
||||
"retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={\"k\": 20})\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b5a514b7-027a-4dd4-9cfc-63fb4d50aa66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Doing reranking with JinaRerank"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bdd9e0ca-d728-42cb-88ad-459fb8a56b33",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's wrap our base retriever with a ContextualCompressionRetriever, using Jina Reranker as a compressor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3000019e-cc0d-4365-91d0-72247ee4d624",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.retrievers import ContextualCompressionRetriever\n",
|
||||
"from langchain_community.document_compressors import JinaRerank\n",
|
||||
"\n",
|
||||
"compressor = JinaRerank()\n",
|
||||
"compression_retriever = ContextualCompressionRetriever(\n",
|
||||
" base_compressor=compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f314f74c-48a9-4243-8d3c-2b7f820e1e40",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pretty_print_docs(compressed_docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "87164f04-194b-4138-8d94-f179f6f34a31",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## QA reranking with Jina Reranker"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "2b4ab60b-5a26-4cfb-9b58-3dc2d83b772b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"Answer any use questions based solely on the context below:\n",
|
||||
"\n",
|
||||
"<context>\n",
|
||||
"\u001b[33;1m\u001b[1;3m{context}\u001b[0m\n",
|
||||
"</context>\n",
|
||||
"\n",
|
||||
"=============================\u001b[1m Messages Placeholder \u001b[0m=============================\n",
|
||||
"\n",
|
||||
"\u001b[33;1m\u001b[1;3m{chat_history}\u001b[0m\n",
|
||||
"\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"\u001b[33;1m\u001b[1;3m{input}\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain.chains import create_retrieval_chain\n",
|
||||
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
|
||||
"\n",
|
||||
"retrieval_qa_chat_prompt = hub.pull(\"langchain-ai/retrieval-qa-chat\")\n",
|
||||
"retrieval_qa_chat_prompt.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "72af3eb3-b644-4b5f-bf5f-f1dc43c96882",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"combine_docs_chain = create_stuff_documents_chain(llm, retrieval_qa_chat_prompt)\n",
|
||||
"chain = create_retrieval_chain(compression_retriever, combine_docs_chain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "126401a7-c545-4de0-92dc-e9bc1001a6ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain.invoke({\"input\": query})"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv-2",
|
||||
"language": "python",
|
||||
"name": "poetry-venv-2"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -350,7 +350,7 @@
|
||||
"retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={\"k\": 20})\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = retriever.get_relevant_documents(query)\n",
|
||||
"docs = retriever.invoke(query)\n",
|
||||
"pretty_print_docs(docs)"
|
||||
]
|
||||
},
|
||||
@@ -388,7 +388,7 @@
|
||||
" base_compressor=ov_compressor, base_retriever=retriever\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"compressed_docs = compression_retriever.get_relevant_documents(\n",
|
||||
"compressed_docs = compression_retriever.invoke(\n",
|
||||
" \"What did the president say about Ketanji Jackson Brown\"\n",
|
||||
")\n",
|
||||
"print([doc.metadata[\"id\"] for doc in compressed_docs])"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user