mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-18 04:25:22 +00:00
Compare commits
381 Commits
erick/airb
...
eugene/pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52a7f48522 | ||
|
|
b68867e152 | ||
|
|
6f477e3cb6 | ||
|
|
cfecbda48b | ||
|
|
d7c14cb6f9 | ||
|
|
55c624a694 | ||
|
|
d27600c6f7 | ||
|
|
4159a4723c | ||
|
|
72ba738bf5 | ||
|
|
fc6b92bb9a | ||
|
|
1f422318b7 | ||
|
|
8595c3ab59 | ||
|
|
a9457d269e | ||
|
|
29c58528c7 | ||
|
|
999365186b | ||
|
|
16e64d889a | ||
|
|
c93d4ea91c | ||
|
|
0199b73188 | ||
|
|
03c38005cb | ||
|
|
a6cbb755a7 | ||
|
|
55db737302 | ||
|
|
b2a11ce686 | ||
|
|
37eb3a4a9e | ||
|
|
cbec43afa9 | ||
|
|
903541f439 | ||
|
|
fb9ce95184 | ||
|
|
6c9b0f96f3 | ||
|
|
441a8012b3 | ||
|
|
9246ec6b36 | ||
|
|
6e090280fd | ||
|
|
e65dc4b95b | ||
|
|
d5415dbd68 | ||
|
|
01fc69c191 | ||
|
|
08b769d539 | ||
|
|
f1313339ac | ||
|
|
d667b1ea8f | ||
|
|
5b6b1f9e1d | ||
|
|
03ba1d4731 | ||
|
|
c281ec8887 | ||
|
|
d01bad5169 | ||
|
|
980658cb47 | ||
|
|
91f4c80143 | ||
|
|
dac2e0165a | ||
|
|
e5bdb26f76 | ||
|
|
a1ff21f90f | ||
|
|
0b1e09029f | ||
|
|
56f4c5459b | ||
|
|
e6952b04d5 | ||
|
|
aa68fd7e91 | ||
|
|
63343b4987 | ||
|
|
727d5023ce | ||
|
|
e1a6341940 | ||
|
|
82de8fd6c9 | ||
|
|
3d3b46a782 | ||
|
|
743f888580 | ||
|
|
64e1df3d3a | ||
|
|
96dc180883 | ||
|
|
fa6397d76a | ||
|
|
6ea3e57a63 | ||
|
|
560e2182d8 | ||
|
|
63898dbda0 | ||
|
|
db7403d667 | ||
|
|
b617085af0 | ||
|
|
b43a9d5808 | ||
|
|
2549df00cd | ||
|
|
375ab7bf59 | ||
|
|
0cc0467267 | ||
|
|
3b52ee05d1 | ||
|
|
5402aef32e | ||
|
|
515aab3312 | ||
|
|
49e932cd24 | ||
|
|
16ef88a87d | ||
|
|
06190063e7 | ||
|
|
1b813fe6fe | ||
|
|
ef6d3d66d6 | ||
|
|
d4dc98a9f9 | ||
|
|
e71daa7a03 | ||
|
|
4babefcb2f | ||
|
|
7d36ee38b7 | ||
|
|
f959fad56e | ||
|
|
d93d49bc43 | ||
|
|
a99e644913 | ||
|
|
ac57123f40 | ||
|
|
47cfbe7522 | ||
|
|
bc028294d0 | ||
|
|
11e37943ed | ||
|
|
3b093160c4 | ||
|
|
4856a87261 | ||
|
|
c4599444ee | ||
|
|
cceaca3e4f | ||
|
|
8a2528c34a | ||
|
|
b40c80007f | ||
|
|
53ac1ebbbc | ||
|
|
e980c14d6a | ||
|
|
f6bcd42421 | ||
|
|
b20c2640da | ||
|
|
a9cda536ad | ||
|
|
0b20c098df | ||
|
|
f6c8700326 | ||
|
|
3fa711dce0 | ||
|
|
2bcd760c46 | ||
|
|
a031c183ae | ||
|
|
d95ea3550e | ||
|
|
b58b38769d | ||
|
|
5d220975fc | ||
|
|
aa9ccca775 | ||
|
|
68298cdc82 | ||
|
|
d9396bdec1 | ||
|
|
7d216ad1e1 | ||
|
|
455a74486b | ||
|
|
5ac1860484 | ||
|
|
9525e392de | ||
|
|
140f06e59a | ||
|
|
280a914920 | ||
|
|
9dfce56b31 | ||
|
|
00614f332a | ||
|
|
3c4529ac69 | ||
|
|
e46419c851 | ||
|
|
8609afbd10 | ||
|
|
4c2e887276 | ||
|
|
40f846e65d | ||
|
|
69e9610f62 | ||
|
|
e5d7e455dc | ||
|
|
7ad0a3f2a7 | ||
|
|
305d74c67a | ||
|
|
89af30807b | ||
|
|
f8078e41e5 | ||
|
|
30e4a35d7a | ||
|
|
17c62e0f3a | ||
|
|
7eb376d5fc | ||
|
|
2c835baae4 | ||
|
|
4b3dd34544 | ||
|
|
d314acb2d5 | ||
|
|
50f93d86ec | ||
|
|
4761c09e94 | ||
|
|
ff31cc1648 | ||
|
|
f36418a5b0 | ||
|
|
9b2f9ee952 | ||
|
|
6f544a6a25 | ||
|
|
b82644078e | ||
|
|
bb0dd8f82f | ||
|
|
7afecec280 | ||
|
|
efcdf54edd | ||
|
|
58c7687174 | ||
|
|
bc648f6cfc | ||
|
|
044bc22acc | ||
|
|
07de4abe70 | ||
|
|
24a0a4472a | ||
|
|
dc4ce82ddd | ||
|
|
bde199d128 | ||
|
|
785f8ab174 | ||
|
|
77868b1974 | ||
|
|
ae3c7f702c | ||
|
|
ca9c8c58ea | ||
|
|
c3310c5e7f | ||
|
|
95904fe443 | ||
|
|
21c45475c5 | ||
|
|
edf9d1c905 | ||
|
|
7c26ef88a1 | ||
|
|
516cc44b3f | ||
|
|
94e58dd827 | ||
|
|
780337488e | ||
|
|
bd329e9aad | ||
|
|
6fa1438334 | ||
|
|
7de1d9acfd | ||
|
|
aee5138930 | ||
|
|
21f75991d4 | ||
|
|
ec026004cb | ||
|
|
866d6408af | ||
|
|
366ba77459 | ||
|
|
514fe80778 | ||
|
|
bcc771e37c | ||
|
|
9235dade90 | ||
|
|
5aa68936e0 | ||
|
|
611d5a1618 | ||
|
|
635b3372bd | ||
|
|
a1b26dd9b6 | ||
|
|
8d2c34e655 | ||
|
|
160a7077b0 | ||
|
|
7c092f479f | ||
|
|
d96e0b2de7 | ||
|
|
7cd87d2f6a | ||
|
|
e64cf1aba4 | ||
|
|
ff94f86ce1 | ||
|
|
1a55e950aa | ||
|
|
fd4f536c77 | ||
|
|
05008c4f94 | ||
|
|
80eb510a7b | ||
|
|
ef9813dae6 | ||
|
|
0e0030f494 | ||
|
|
c244e1a50b | ||
|
|
f79d0cb9fb | ||
|
|
eec023766e | ||
|
|
f2a7dda4bd | ||
|
|
a49ac55964 | ||
|
|
cee03630d9 | ||
|
|
0ddfe7fc9d | ||
|
|
0a784074d1 | ||
|
|
6327be9048 | ||
|
|
553a520ab6 | ||
|
|
d647ff1a9a | ||
|
|
ebc4a64f9e | ||
|
|
4468e5bdbe | ||
|
|
cced3eb9bc | ||
|
|
b9c62fb905 | ||
|
|
c20aeef79a | ||
|
|
527676a753 | ||
|
|
34d6f0557d | ||
|
|
745d2476a2 | ||
|
|
aa785fa6ec | ||
|
|
caf47ab666 | ||
|
|
b551d49cf5 | ||
|
|
f5b9aedc48 | ||
|
|
c922ea36cb | ||
|
|
190887c5cd | ||
|
|
bbe164ad28 | ||
|
|
781aee0068 | ||
|
|
e3ff107e4f | ||
|
|
9e569d85a4 | ||
|
|
191ddbc77e | ||
|
|
508f75853c | ||
|
|
7ce81eb6f4 | ||
|
|
5157b15446 | ||
|
|
98cd8f673b | ||
|
|
4d7f6fa968 | ||
|
|
321db89e87 | ||
|
|
d5cf360329 | ||
|
|
b15d150d22 | ||
|
|
7253b816cc | ||
|
|
06165efb5b | ||
|
|
41e2f60cd2 | ||
|
|
6cdca4355d | ||
|
|
e276817e1d | ||
|
|
d4b025c812 | ||
|
|
688a5bd106 | ||
|
|
573f48e34d | ||
|
|
69a8ef2693 | ||
|
|
751fb7de20 | ||
|
|
0ae39ab30e | ||
|
|
ae73b9d839 | ||
|
|
2999d06938 | ||
|
|
4c53e31377 | ||
|
|
5e0c58f9c2 | ||
|
|
e5e15c8d59 | ||
|
|
2b7c3c548d | ||
|
|
71d0981f18 | ||
|
|
74b2c0aa01 | ||
|
|
9052d05442 | ||
|
|
49f3cc0f6b | ||
|
|
2ffb2144a6 | ||
|
|
873d06c009 | ||
|
|
9c8523b529 | ||
|
|
af50f21765 | ||
|
|
4881bb669c | ||
|
|
a29e8d8594 | ||
|
|
0d1f6c417c | ||
|
|
911ccf9aa6 | ||
|
|
471f2ed40a | ||
|
|
75122646b5 | ||
|
|
0bec1f6877 | ||
|
|
acf1ecc081 | ||
|
|
18de77cc8c | ||
|
|
e0e688a277 | ||
|
|
cda43c5a11 | ||
|
|
19721246f5 | ||
|
|
950ab056eb | ||
|
|
fad308a764 | ||
|
|
239f0a615e | ||
|
|
915c1f8673 | ||
|
|
578e67c017 | ||
|
|
0d888a65cb | ||
|
|
4ff6aa5c78 | ||
|
|
5c2f7e6b2b | ||
|
|
11195cfa42 | ||
|
|
a7fc731720 | ||
|
|
8639624d40 | ||
|
|
a7500ab0fb | ||
|
|
96d7fe0f85 | ||
|
|
8544f748f2 | ||
|
|
cafffe8a21 | ||
|
|
2d172181e0 | ||
|
|
def329b5f2 | ||
|
|
c24c871d88 | ||
|
|
34284c25d4 | ||
|
|
93ef8ead0b | ||
|
|
43db4cd20e | ||
|
|
a96a6e0f2c | ||
|
|
3d15498612 | ||
|
|
8113d612bb | ||
|
|
dee256ef5a | ||
|
|
a7f63d8cb4 | ||
|
|
ddaf9de169 | ||
|
|
5fcbe9dd2a | ||
|
|
c3580d3c64 | ||
|
|
5a006f7264 | ||
|
|
3dabd3f214 | ||
|
|
07c518ad3e | ||
|
|
9c0f84ae95 | ||
|
|
a28be31a96 | ||
|
|
246724faab | ||
|
|
e778d60aec | ||
|
|
b48865bf94 | ||
|
|
34b31a8cc7 | ||
|
|
5d65b47e41 | ||
|
|
476d6dc596 | ||
|
|
bbb609ac9d | ||
|
|
67c880af74 | ||
|
|
37e89ba5b1 | ||
|
|
66576948e0 | ||
|
|
4f4300723b | ||
|
|
914af69b44 | ||
|
|
46f0cea2b9 | ||
|
|
f5117e907d | ||
|
|
bd9b5dc2f3 | ||
|
|
23e62f8f8d | ||
|
|
cf94091cd0 | ||
|
|
7a18b63dbf | ||
|
|
5b5b37a999 | ||
|
|
9c218d0154 | ||
|
|
a8de6d1533 | ||
|
|
d1f5bc4906 | ||
|
|
aa7bce6b13 | ||
|
|
a5bcddc738 | ||
|
|
8c0b215c02 | ||
|
|
2b0cbd65ba | ||
|
|
bd9f98a20b | ||
|
|
b1f22bf76c | ||
|
|
c0bdd4d45b | ||
|
|
7c8c4e5743 | ||
|
|
1f50274df7 | ||
|
|
ad29806255 | ||
|
|
e54a49b697 | ||
|
|
ead2a74806 | ||
|
|
a88f62ec3c | ||
|
|
ff70cc4e80 | ||
|
|
cdfb5b4ca1 | ||
|
|
3624f56ccb | ||
|
|
48eed86931 | ||
|
|
e00c1ff2b0 | ||
|
|
a35203b164 | ||
|
|
3e29c04213 | ||
|
|
67d48ea600 | ||
|
|
bc6249c889 | ||
|
|
c8c592d3f1 | ||
|
|
3ecb903d49 | ||
|
|
93b87f2bfb | ||
|
|
fcf6213c22 | ||
|
|
61a2eba081 | ||
|
|
8accee57a9 | ||
|
|
010a234f1e | ||
|
|
b8922480ed | ||
|
|
3b975c6ebe | ||
|
|
4bfe888717 | ||
|
|
7f504c1f81 | ||
|
|
6caceb5473 | ||
|
|
fee6f983ef | ||
|
|
aed46cd6f2 | ||
|
|
390ef6abe3 | ||
|
|
3b1eb1f828 | ||
|
|
1e1cac50d8 | ||
|
|
d60e93b6ae | ||
|
|
ca299a8e08 | ||
|
|
8c71f92cb2 | ||
|
|
e188d4ecb0 | ||
|
|
dad949eb99 | ||
|
|
fcaa9cf2f1 | ||
|
|
1beb84b061 | ||
|
|
4a7d73b39d | ||
|
|
6cd7607816 | ||
|
|
9745b5894d | ||
|
|
1af2130ff7 | ||
|
|
ced5e7bae7 | ||
|
|
89d32ffbbd | ||
|
|
c09b520ce4 | ||
|
|
2b234a4d96 | ||
|
|
1b4dcf22f3 | ||
|
|
51f3902bc4 | ||
|
|
cca0167917 | ||
|
|
6041ec3dd1 | ||
|
|
12b4a4d860 | ||
|
|
687d27567d |
3
.github/scripts/check_diff.py
vendored
3
.github/scripts/check_diff.py
vendored
@@ -47,6 +47,9 @@ if __name__ == "__main__":
|
||||
found = True
|
||||
if found:
|
||||
dirs_to_run["extended-test"].add(dir_)
|
||||
elif file.startswith("libs/cli"):
|
||||
# todo: add cli makefile
|
||||
pass
|
||||
elif file.startswith("libs/partners"):
|
||||
partner_dir = file.split("/")[2]
|
||||
if os.path.isdir(f"libs/partners/{partner_dir}") and [
|
||||
|
||||
22
.github/scripts/get_min_versions.py
vendored
22
.github/scripts/get_min_versions.py
vendored
@@ -4,7 +4,12 @@ import tomllib
|
||||
from packaging.version import parse as parse_version
|
||||
import re
|
||||
|
||||
MIN_VERSION_LIBS = ["langchain-core", "langchain-community", "langchain", "langchain-text-splitters"]
|
||||
MIN_VERSION_LIBS = [
|
||||
"langchain-core",
|
||||
"langchain-community",
|
||||
"langchain",
|
||||
"langchain-text-splitters",
|
||||
]
|
||||
|
||||
|
||||
def get_min_version(version: str) -> str:
|
||||
@@ -56,12 +61,13 @@ def get_min_version_from_toml(toml_path: str):
|
||||
return min_versions
|
||||
|
||||
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
if __name__ == "__main__":
|
||||
# Get the TOML file path from the command line argument
|
||||
toml_file = sys.argv[1]
|
||||
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
# Call the function to get the minimum versions
|
||||
min_versions = get_min_version_from_toml(toml_file)
|
||||
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
) # noqa: T201
|
||||
print(
|
||||
" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])
|
||||
) # noqa: T201
|
||||
|
||||
2
.github/workflows/_integration_test.yml
vendored
2
.github/workflows/_integration_test.yml
vendored
@@ -75,6 +75,8 @@ jobs:
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
run: |
|
||||
make integration_tests
|
||||
|
||||
|
||||
44
.github/workflows/_release.yml
vendored
44
.github/workflows/_release.yml
vendored
@@ -55,7 +55,7 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -157,6 +157,24 @@ jobs:
|
||||
run: make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
- name: Run unit tests with minimum dependency versions
|
||||
if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
run: |
|
||||
poetry run pip install $MIN_VERSIONS
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
@@ -196,27 +214,11 @@ jobs:
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
run: make integration_tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Get minimum versions
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
id: min-version
|
||||
run: |
|
||||
poetry run pip install packaging
|
||||
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
|
||||
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
|
||||
echo "min-versions=$min_versions"
|
||||
|
||||
- name: Run unit tests with minimum dependency versions
|
||||
if: ${{ steps.min-version.outputs.min-versions != '' }}
|
||||
env:
|
||||
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
|
||||
run: |
|
||||
poetry run pip install $MIN_VERSIONS
|
||||
make tests
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
publish:
|
||||
needs:
|
||||
- build
|
||||
@@ -246,7 +248,7 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -285,7 +287,7 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
4
.github/workflows/_test_release.yml
vendored
4
.github/workflows/_test_release.yml
vendored
@@ -48,7 +48,7 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
- name: Upload build
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: test-dist
|
||||
path: ${{ inputs.working-directory }}/dist/
|
||||
|
||||
78
.github/workflows/api_doc_build.yml
vendored
78
.github/workflows/api_doc_build.yml
vendored
@@ -1,78 +0,0 @@
|
||||
name: API docs build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: bagatur/api_docs_build
|
||||
path: langchain
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-google
|
||||
path: langchain-google
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: langchain-ai/langchain-datastax
|
||||
path: langchain-datastax
|
||||
|
||||
- name: Set Git config
|
||||
working-directory: langchain
|
||||
run: |
|
||||
git config --local user.email "actions@github.com"
|
||||
git config --local user.name "Github Actions"
|
||||
|
||||
- name: Merge master
|
||||
working-directory: langchain
|
||||
run: |
|
||||
git fetch origin master
|
||||
git merge origin/master -m "Merge master" --allow-unrelated-histories -X theirs
|
||||
|
||||
- name: Move google libs
|
||||
run: |
|
||||
rm -rf \
|
||||
langchain/libs/partners/google-genai \
|
||||
langchain/libs/partners/google-vertexai \
|
||||
langchain/libs/partners/astradb
|
||||
mv langchain-google/libs/genai langchain/libs/partners/google-genai
|
||||
mv langchain-google/libs/vertexai langchain/libs/partners/google-vertexai
|
||||
mv langchain-datastax/libs/astradb langchain/libs/partners/astradb
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./langchain/.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: api-docs
|
||||
working-directory: langchain
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: langchain
|
||||
run: |
|
||||
poetry run python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
poetry run python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext
|
||||
# skip airbyte and ibm due to pandas dependency issue
|
||||
poetry run python -m pip install $(ls ./libs/partners | grep -vE "airbyte|ibm" | xargs -I {} echo "./libs/partners/{}")
|
||||
poetry run python -m pip install --exists-action=w --no-cache-dir -r docs/api_reference/requirements.txt
|
||||
|
||||
- name: Build docs
|
||||
working-directory: langchain
|
||||
run: |
|
||||
poetry run python -m pip install --upgrade --no-cache-dir pip setuptools
|
||||
poetry run python docs/api_reference/create_api_rst.py
|
||||
poetry run python -m sphinx -T -E -b html -d _build/doctrees -c docs/api_reference docs/api_reference api_reference_build/html -j auto
|
||||
|
||||
# https://github.com/marketplace/actions/add-commit
|
||||
- uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
cwd: langchain
|
||||
message: 'Update API docs build'
|
||||
24
.github/workflows/check-broken-links.yml
vendored
Normal file
24
.github/workflows/check-broken-links.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Check Broken Links
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 13 * * *'
|
||||
|
||||
jobs:
|
||||
check-links:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js 18.x
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18.x
|
||||
cache: "yarn"
|
||||
cache-dependency-path: ./docs/yarn.lock
|
||||
- name: Install dependencies
|
||||
run: yarn install --immutable --mode=skip-build
|
||||
working-directory: ./docs
|
||||
- name: Check broken links
|
||||
run: yarn check-broken-links
|
||||
working-directory: ./docs
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -116,6 +116,7 @@ celerybeat.pid
|
||||
.env
|
||||
.envrc
|
||||
.venv*
|
||||
venv*
|
||||
env/
|
||||
ENV/
|
||||
env.bak/
|
||||
|
||||
2
Makefile
2
Makefile
@@ -30,7 +30,7 @@ api_docs_build:
|
||||
cd docs/api_reference && poetry run make html
|
||||
|
||||
api_docs_clean:
|
||||
rm -f docs/api_reference/api_reference.rst
|
||||
find ./docs/api_reference -name '*_api_reference.rst' -delete
|
||||
cd docs/api_reference && poetry run make clean
|
||||
|
||||
api_docs_linkcheck:
|
||||
|
||||
@@ -50,7 +50,7 @@ The LangChain libraries themselves are made up of several different packages.
|
||||
- **[`langchain-community`](libs/community)**: Third party integrations.
|
||||
- **[`langchain`](libs/langchain)**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
|
||||
|
||||

|
||||

|
||||
|
||||
## 🧱 What can you build with LangChain?
|
||||
**❓ Retrieval augmented generation**
|
||||
|
||||
61
SECURITY.md
61
SECURITY.md
@@ -1,6 +1,61 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
## Reporting OSS Vulnerabilities
|
||||
|
||||
Please report security vulnerabilities by email to `security@langchain.dev`.
|
||||
This email is an alias to a subset of our maintainers, and will ensure the issue is promptly triaged and acted upon as needed.
|
||||
LangChain is partnered with [huntr by Protect AI](https://huntr.com/) to provide
|
||||
a bounty program for our open source projects.
|
||||
|
||||
Please report security vulnerabilities associated with the LangChain
|
||||
open source projects by visiting the following link:
|
||||
|
||||
[https://huntr.com/bounties/disclose/](https://huntr.com/bounties/disclose/?target=https%3A%2F%2Fgithub.com%2Flangchain-ai%2Flangchain&validSearch=true)
|
||||
|
||||
Before reporting a vulnerability, please review:
|
||||
|
||||
1) In-Scope Targets and Out-of-Scope Targets below.
|
||||
2) The [langchain-ai/langchain](https://python.langchain.com/docs/contributing/repo_structure) monorepo structure.
|
||||
3) LangChain [security guidelines](https://python.langchain.com/docs/security) to
|
||||
understand what we consider to be a security vulnerability vs. developer
|
||||
responsibility.
|
||||
|
||||
### In-Scope Targets
|
||||
|
||||
The following packages and repositories are eligible for bug bounties:
|
||||
|
||||
- langchain-core
|
||||
- langchain (see exceptions)
|
||||
- langchain-community (see exceptions)
|
||||
- langgraph
|
||||
- langserve
|
||||
|
||||
### Out of Scope Targets
|
||||
|
||||
All out of scope targets defined by huntr as well as:
|
||||
|
||||
- **langchain-experimental**: This repository is for experimental code and is not
|
||||
eligible for bug bounties, bug reports to it will be marked as interesting or waste of
|
||||
time and published with no bounty attached.
|
||||
- **tools**: Tools in either langchain or langchain-community are not eligible for bug
|
||||
bounties. This includes the following directories
|
||||
- langchain/tools
|
||||
- langchain-community/tools
|
||||
- Please review our [security guidelines](https://python.langchain.com/docs/security)
|
||||
for more details, but generally tools interact with the real world. Developers are
|
||||
expected to understand the security implications of their code and are responsible
|
||||
for the security of their tools.
|
||||
- Code documented with security notices. This will be decided done on a case by
|
||||
case basis, but likely will not be eligible for a bounty as the code is already
|
||||
documented with guidelines for developers that should be followed for making their
|
||||
application secure.
|
||||
- Any LangSmith related repositories or APIs see below.
|
||||
|
||||
## Reporting LangSmith Vulnerabilities
|
||||
|
||||
Please report security vulnerabilities associated with LangSmith by email to `security@langchain.dev`.
|
||||
|
||||
- LangSmith site: https://smith.langchain.com
|
||||
- SDK client: https://github.com/langchain-ai/langsmith-sdk
|
||||
|
||||
### Other Security Concerns
|
||||
|
||||
For any other security concerns, please contact us at `security@langchain.dev`.
|
||||
|
||||
@@ -8,6 +8,7 @@ Notebook | Description
|
||||
[Semi_Structured_RAG.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_Structured_RAG.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data, including text and tables, using unstructured for parsing, multi-vector retriever for storing, and lcel for implementing chains.
|
||||
[Semi_structured_and_multi_moda...](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data and images, using unstructured for parsing, multi-vector retriever for storage and retrieval, and lcel for implementing chains.
|
||||
[Semi_structured_multi_modal_RA...](https://github.com/langchain-ai/langchain/tree/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) | Perform retrieval-augmented generation (rag) on documents with semi-structured data and images, using various tools and methods such as unstructured for parsing, multi-vector retriever for storing, lcel for implementing chains, and open source language models like llama2, llava, and gpt4all.
|
||||
[amazon_personalize_how_to.ipynb](https://github.com/langchain-ai/langchain/blob/master/cookbook/amazon_personalize_how_to.ipynb) | Retrieving personalized recommendations from Amazon Personalize and use custom agents to build generative AI apps
|
||||
[analyze_document.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/analyze_document.ipynb) | Analyze a single long document.
|
||||
[autogpt/autogpt.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/autogpt/autogpt.ipynb) | Implement autogpt, a language model, with langchain primitives such as llms, prompttemplates, vectorstores, embeddings, and tools.
|
||||
[autogpt/marathon_times.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/autogpt/marathon_times.ipynb) | Implement autogpt for finding winning marathon times.
|
||||
|
||||
@@ -40,7 +40,9 @@
|
||||
"import nest_asyncio\n",
|
||||
"import pandas as pd\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n",
|
||||
"from langchain_experimental.agents.agent_toolkits.pandas.base import (\n",
|
||||
" create_pandas_dataframe_agent,\n",
|
||||
")\n",
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"whats 2 + 2\")"
|
||||
"agent.invoke(\"whats 2 + 2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
"\n",
|
||||
"bash_chain = LLMBashChain.from_llm(llm, verbose=True)\n",
|
||||
"\n",
|
||||
"bash_chain.run(text)"
|
||||
"bash_chain.invoke(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -135,7 +135,7 @@
|
||||
"\n",
|
||||
"text = \"Please write a bash script that prints 'Hello World' to the console.\"\n",
|
||||
"\n",
|
||||
"bash_chain.run(text)"
|
||||
"bash_chain.invoke(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -190,7 +190,7 @@
|
||||
"\n",
|
||||
"text = \"List the current directory then move up a level.\"\n",
|
||||
"\n",
|
||||
"bash_chain.run(text)"
|
||||
"bash_chain.invoke(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -231,7 +231,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Run the same command again and see that the state is maintained between calls\n",
|
||||
"bash_chain.run(text)"
|
||||
"bash_chain.invoke(text)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -245,7 +245,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"def _parse(text):\n",
|
||||
" return text.strip(\"**\")"
|
||||
" return text.strip('\"').strip(\"**\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install langchain lark openai elasticsearch pandas"
|
||||
"!pip install langchain langchain-elasticsearch lark openai elasticsearch pandas"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
" \n",
|
||||
"[Together AI](https://python.langchain.com/docs/integrations/llms/together) has a broad set of OSS LLMs via inference API.\n",
|
||||
"\n",
|
||||
"See [here](https://api.together.xyz/playground). We use `\"mistralai/Mixtral-8x7B-Instruct-v0.1` for RAG on the Mixtral paper.\n",
|
||||
"See [here](https://docs.together.ai/docs/inference-models). We use `\"mistralai/Mixtral-8x7B-Instruct-v0.1` for RAG on the Mixtral paper.\n",
|
||||
"\n",
|
||||
"Download the paper:\n",
|
||||
"https://arxiv.org/pdf/2401.04088.pdf"
|
||||
@@ -148,7 +148,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -52,6 +52,28 @@ services:
|
||||
retries: 60
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
pgvector:
|
||||
# postgres with the pgvector extension
|
||||
image: ankane/pgvector
|
||||
environment:
|
||||
POSTGRES_DB: langchain
|
||||
POSTGRES_USER: langchain
|
||||
POSTGRES_PASSWORD: langchain
|
||||
ports:
|
||||
- "6024:5432"
|
||||
command: |
|
||||
postgres -c log_statement=all
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"psql postgresql://langchain:langchain@localhost/langchain --command 'SELECT 1;' || exit 1",
|
||||
]
|
||||
interval: 5s
|
||||
retries: 60
|
||||
volumes:
|
||||
- postgres_data_pgvector:/var/lib/postgresql/data
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
postgres_data_pgvector:
|
||||
|
||||
1
docs/.gitignore
vendored
1
docs/.gitignore
vendored
@@ -1 +1,2 @@
|
||||
/.quarto/
|
||||
src/supabase.d.ts
|
||||
|
||||
@@ -14,19 +14,20 @@ For the most part, new integrations should be added to the Community package. Pa
|
||||
|
||||
In the following sections, we'll walk through how to contribute to each of these packages from a fake company, `Parrot Link AI`.
|
||||
|
||||
## Community Package
|
||||
## Community package
|
||||
|
||||
The `langchain-community` package is in `libs/community` and contains most integrations.
|
||||
|
||||
It is installed by users with `pip install langchain-community`, and exported members can be imported with code like
|
||||
It can be installed with `pip install langchain-community`, and exported members can be imported with code like
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ParrotLinkLLM
|
||||
from langchain_community.llms import ChatParrotLink
|
||||
from langchain_community.chat_models import ChatParrotLink
|
||||
from langchain_community.llms import ParrotLinkLLM
|
||||
from langchain_community.vectorstores import ParrotLinkVectorStore
|
||||
```
|
||||
|
||||
The community package relies on manually-installed dependent packages, so you will see errors if you try to import a package that is not installed. In our fake example, if you tried to import `ParrotLinkLLM` without installing `parrot-link-sdk`, you will see an `ImportError` telling you to install it when trying to use it.
|
||||
The `community` package relies on manually-installed dependent packages, so you will see errors
|
||||
if you try to import a package that is not installed. In our fake example, if you tried to import `ParrotLinkLLM` without installing `parrot-link-sdk`, you will see an `ImportError` telling you to install it when trying to use it.
|
||||
|
||||
Let's say we wanted to implement a chat model for Parrot Link AI. We would create a new file in `libs/community/langchain_community/chat_models/parrot_link.py` with the following code:
|
||||
|
||||
@@ -39,7 +40,7 @@ class ChatParrotLink(BaseChatModel):
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_parrot_link import ChatParrotLink
|
||||
from langchain_community.chat_models import ChatParrotLink
|
||||
|
||||
model = ChatParrotLink()
|
||||
"""
|
||||
@@ -56,9 +57,16 @@ And add documentation to:
|
||||
|
||||
- `docs/docs/integrations/chat/parrot_link.ipynb`
|
||||
|
||||
## Partner Packages
|
||||
## Partner package in LangChain repo
|
||||
|
||||
Partner packages are in `libs/partners/*` and are installed by users with `pip install langchain-{partner}`, and exported members can be imported with code like
|
||||
Partner packages can be hosted in the `LangChain` monorepo or in an external repo.
|
||||
|
||||
Partner package in the `LangChain` repo is placed in `libs/partners/{partner}`
|
||||
and the package source code is in `libs/partners/{partner}/langchain_{partner}`.
|
||||
|
||||
A package is
|
||||
installed by users with `pip install langchain-{partner}`, and the package members
|
||||
can be imported with code like:
|
||||
|
||||
```python
|
||||
from langchain_{partner} import X
|
||||
@@ -123,13 +131,49 @@ By default, this will include stubs for a Chat Model, an LLM, and/or a Vector St
|
||||
|
||||
### Write Unit and Integration Tests
|
||||
|
||||
Some basic tests are generated in the tests/ directory. You should add more tests to cover your package's functionality.
|
||||
Some basic tests are presented in the `tests/` directory. You should add more tests to cover your package's functionality.
|
||||
|
||||
For information on running and implementing tests, see the [Testing guide](./testing).
|
||||
|
||||
### Write documentation
|
||||
|
||||
Documentation is generated from Jupyter notebooks in the `docs/` directory. You should move the generated notebooks to the relevant `docs/docs/integrations` directory in the monorepo root.
|
||||
Documentation is generated from Jupyter notebooks in the `docs/` directory. You should place the notebooks with examples
|
||||
to the relevant `docs/docs/integrations` directory in the monorepo root.
|
||||
|
||||
### (If Necessary) Deprecate community integration
|
||||
|
||||
Note: this is only necessary if you're migrating an existing community integration into
|
||||
a partner package. If the component you're integrating is net-new to LangChain (i.e.
|
||||
not already in the `community` package), you can skip this step.
|
||||
|
||||
Let's pretend we migrated our `ChatParrotLink` chat model from the community package to
|
||||
the partner package. We would need to deprecate the old model in the community package.
|
||||
|
||||
We would do that by adding a `@deprecated` decorator to the old model as follows, in
|
||||
`libs/community/langchain_community/chat_models/parrot_link.py`.
|
||||
|
||||
Before our change, our chat model might look like this:
|
||||
|
||||
```python
|
||||
class ChatParrotLink(BaseChatModel):
|
||||
...
|
||||
```
|
||||
|
||||
After our change, it would look like this:
|
||||
|
||||
```python
|
||||
from langchain_core._api.deprecation import deprecated
|
||||
|
||||
@deprecated(
|
||||
since="0.0.<next community version>",
|
||||
removal="0.2.0",
|
||||
alternative_import="langchain_parrot_link.ChatParrotLink"
|
||||
)
|
||||
class ChatParrotLink(BaseChatModel):
|
||||
...
|
||||
```
|
||||
|
||||
You should do this for *each* component that you're migrating to the partner package.
|
||||
|
||||
### Additional steps
|
||||
|
||||
@@ -143,3 +187,15 @@ Maintainer steps (Contributors should **not** do these):
|
||||
- [ ] set up pypi and test pypi projects
|
||||
- [ ] add credential secrets to Github Actions
|
||||
- [ ] add package to conda-forge
|
||||
|
||||
## Partner package in external repo
|
||||
|
||||
If you are creating a partner package in an external repo, you should follow the same steps as above,
|
||||
but you will need to set up your own CI/CD and package management.
|
||||
|
||||
Name your package as `langchain-{partner}-{integration}`.
|
||||
|
||||
Still, you have to create the `libs/partners/{partner}-{integration}` folder in the `LangChain` monorepo
|
||||
and add a `README.md` file with a link to the external repo.
|
||||
See this [example](https://github.com/langchain-ai/langchain/tree/master/libs/partners/google-genai).
|
||||
This allows keeping track of all the partner packages in the `LangChain` documentation.
|
||||
|
||||
@@ -20,9 +20,11 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"cell_type": "code",
|
||||
"id": "0f316b5c",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai"
|
||||
]
|
||||
|
||||
@@ -20,9 +20,11 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"cell_type": "code",
|
||||
"id": "b3121aa8",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain langchain-openai"
|
||||
]
|
||||
|
||||
@@ -31,9 +31,11 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "278b0027",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
|
||||
]
|
||||
@@ -217,7 +219,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_openai.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"llm.invoke(prompt_value)"
|
||||
@@ -336,8 +338,7 @@
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_texts(\n",
|
||||
" [\"harrison worked at kensho\", \"bears like to eat honey\"],\n",
|
||||
|
||||
@@ -557,7 +557,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"openai_poem = chain.with_config(configurable={\"llm\": \"openai\"})"
|
||||
"openai_joke = chain.with_config(configurable={\"llm\": \"openai\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -578,7 +578,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"openai_poem.invoke({\"topic\": \"bears\"})"
|
||||
"openai_joke.invoke({\"topic\": \"bears\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
"id": "9eb73e8b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will show examples of streaming using the chat model from [Anthropic](https://python.langchain.com/docs/integrations/platforms/anthropic). To use the model, you will need to install the `langchain-anthropic` package. You can do this with the following command:"
|
||||
"We will show examples of streaming using the chat model from [Anthropic](/docs/integrations/platforms/anthropic). To use the model, you will need to install the `langchain-anthropic` package. You can do this with the following command:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -658,7 +658,7 @@
|
||||
"\n",
|
||||
"This is a **beta API**, and we're almost certainly going to make some changes to it.\n",
|
||||
"\n",
|
||||
"This version parameter will allow us to mimimize such breaking changes to your code. \n",
|
||||
"This version parameter will allow us to minimize such breaking changes to your code. \n",
|
||||
"\n",
|
||||
"In short, we are annoying you now, so we don't have to annoy you later.\n",
|
||||
":::"
|
||||
|
||||
@@ -36,9 +36,11 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b99b47ec",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-core langchain-openai langchain-anthropic"
|
||||
]
|
||||
@@ -317,7 +319,7 @@
|
||||
"#### LCEL\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"chain.ainvoke(\"ice cream\")\n",
|
||||
"await chain.ainvoke(\"ice cream\")\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
@@ -737,7 +739,7 @@
|
||||
" return await ainvoke_chain(topic)\n",
|
||||
" except Exception:\n",
|
||||
" # Note: we haven't actually implemented this.\n",
|
||||
" return ainvoke_anthropic_chain(topic)\n",
|
||||
" return await ainvoke_anthropic_chain(topic)\n",
|
||||
"\n",
|
||||
"async def batch_chain_with_fallback(topics: List[str]) -> str:\n",
|
||||
" try:\n",
|
||||
@@ -963,7 +965,7 @@
|
||||
" try:\n",
|
||||
" return await ainvoke_chain(topic)\n",
|
||||
" except Exception:\n",
|
||||
" return ainvoke_anthropic_chain(topic)\n",
|
||||
" return await ainvoke_anthropic_chain(topic)\n",
|
||||
"\n",
|
||||
"async def batch_chain_with_fallback(topics: List[str]) -> str:\n",
|
||||
" try:\n",
|
||||
@@ -1068,7 +1070,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -286,7 +286,7 @@ embeddings = OllamaEmbeddings()
|
||||
</TabItem>
|
||||
<TabItem value="cohere" label="Cohere (API)" default>
|
||||
|
||||
Make sure you have the `cohere` package installed an the appropriate environment variables set (these are the same as needed for the LLM).
|
||||
Make sure you have the `cohere` package installed and the appropriate environment variables set (these are the same as needed for the LLM).
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import CohereEmbeddings
|
||||
@@ -563,7 +563,6 @@ from langchain_community.vectorstores import FAISS
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
from langchain.tools.retriever import create_retriever_tool
|
||||
from langchain_community.tools.tavily_search import TavilySearchResults
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain import hub
|
||||
from langchain.agents import create_openai_functions_agent
|
||||
from langchain.agents import AgentExecutor
|
||||
|
||||
@@ -23,7 +23,7 @@ We also are working to share guides and cookbooks that demonstrate how to use th
|
||||
|
||||
## LangSmith Evaluation
|
||||
|
||||
LangSmith provides an integrated evaluation and tracing framework that allows you to check for regressions, compare systems, and easily identify and fix any sources of errors and performance issues. Check out the docs on [LangSmith Evaluation](https://docs.smith.langchain.com/category/testing--evaluation) and additional [cookbooks](https://docs.smith.langchain.com/category/langsmith-cookbook) for more detailed information on evaluating your applications.
|
||||
LangSmith provides an integrated evaluation and tracing framework that allows you to check for regressions, compare systems, and easily identify and fix any sources of errors and performance issues. Check out the docs on [LangSmith Evaluation](https://docs.smith.langchain.com/evaluation) and additional [cookbooks](https://docs.smith.langchain.com/cookbook) for more detailed information on evaluating your applications.
|
||||
|
||||
## LangChain benchmarks
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# JSON Evaluators\n",
|
||||
"\n",
|
||||
"Evaluating [extraction](https://python.langchain.com/docs/use_cases/extraction) and function calling applications often comes down to validation that the LLM's string output can be parsed correctly and how it compares to a reference object. The following `JSON` validators provide functionality to check your model's output consistently.\n",
|
||||
"Evaluating [extraction](/docs/use_cases/extraction) and function calling applications often comes down to validation that the LLM's string output can be parsed correctly and how it compares to a reference object. The following `JSON` validators provide functionality to check your model's output consistently.\n",
|
||||
"\n",
|
||||
"## JsonValidityEvaluator\n",
|
||||
"\n",
|
||||
|
||||
13
docs/docs/guides/extending_langchain.mdx
Normal file
13
docs/docs/guides/extending_langchain.mdx
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
hide_table_of_contents: true
|
||||
---
|
||||
|
||||
# Extending LangChain
|
||||
|
||||
Extending LangChain's base abstractions, whether you're planning to contribute back to the open-source repo or build a bespoke internal integration, is encouraged.
|
||||
|
||||
Check out these guides for building your own custom classes for the following modules:
|
||||
|
||||
- [Chat models](/docs/modules/model_io/chat/custom_chat_model) for interfacing with chat-tuned language models.
|
||||
- [LLMs](/docs/modules/model_io/llms/custom_llm) for interfacing with text language models.
|
||||
- [Output parsers](/docs/modules/model_io/output_parsers/custom) for handling language model outputs.
|
||||
@@ -98,7 +98,7 @@
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"llm = Ollama(model=\"llama2\")\n",
|
||||
"llm(\"The first man on the moon was ...\")"
|
||||
"llm.invoke(\"The first man on the moon was ...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -140,7 +140,7 @@
|
||||
"llm = Ollama(\n",
|
||||
" model=\"llama2\", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])\n",
|
||||
")\n",
|
||||
"llm(\"The first man on the moon was ...\")"
|
||||
"llm.invoke(\"The first man on the moon was ...\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -226,7 +226,7 @@
|
||||
"from langchain_community.llms import Ollama\n",
|
||||
"\n",
|
||||
"llm = Ollama(model=\"llama2:13b\")\n",
|
||||
"llm(\"The first man on the moon was ... think step by step\")"
|
||||
"llm.invoke(\"The first man on the moon was ... think step by step\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -369,7 +369,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm(\"The first man on the moon was ... Let's think step by step\")"
|
||||
"llm.invoke(\"The first man on the moon was ... Let's think step by step\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -426,7 +426,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm(\"The first man on the moon was ... Let's think step by step\")"
|
||||
"llm.invoke(\"The first man on the moon was ... Let's think step by step\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"<img src=\"/img/qa_privacy_protection.png\" width=\"900\"/>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"In the following notebook, we will not go into the details of how the anonymizer works. If you are interested, please visit [this part of the documentation](https://python.langchain.com/docs/guides/privacy/presidio_data_anonymization/).\n",
|
||||
"In the following notebook, we will not go into the details of how the anonymizer works. If you are interested, please visit [this part of the documentation](/docs/guides/privacy/presidio_data_anonymization/).\n",
|
||||
"\n",
|
||||
"## Quickstart\n",
|
||||
"\n",
|
||||
|
||||
@@ -16,13 +16,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "6017f26a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import openai\n",
|
||||
"from langchain.adapters import openai as lc_openai"
|
||||
"from langchain_community.adapters import openai as lc_openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -277,7 +277,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import openai\n",
|
||||
"from langchain.adapters import openai as lc_openai"
|
||||
"from langchain_community.adapters import openai as lc_openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -310,7 +310,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.5"
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -129,7 +129,7 @@
|
||||
"Who was famed for their Christian spirit?\n",
|
||||
"Who assimilted the Roman language?\n",
|
||||
"Who ruled the country of Normandy?\n",
|
||||
"What principality did William the conquerer found?\n",
|
||||
"What principality did William the conqueror found?\n",
|
||||
"What is the original meaning of the word Norman?\n",
|
||||
"When was the Latin version of the word Norman first recorded?\n",
|
||||
"What name comes from the English words Normans/Normanz?\"\"\"\n",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
">[PromptLayer](https://docs.promptlayer.com/introduction) is a platform for prompt engineering. It also helps with the LLM observability to visualize requests, version prompts, and track usage.\n",
|
||||
">\n",
|
||||
">While `PromptLayer` does have LLMs that integrate directly with LangChain (e.g. [`PromptLayerOpenAI`](https://python.langchain.com/docs/integrations/llms/promptlayer_openai)), using a callback is the recommended way to integrate `PromptLayer` with LangChain.\n",
|
||||
">While `PromptLayer` does have LLMs that integrate directly with LangChain (e.g. [`PromptLayerOpenAI`](/docs/integrations/llms/promptlayer_openai)), using a callback is the recommended way to integrate `PromptLayer` with LangChain.\n",
|
||||
"\n",
|
||||
"In this guide, we will go over how to setup the `PromptLayerCallbackHandler`. \n",
|
||||
"\n",
|
||||
|
||||
@@ -124,7 +124,7 @@
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"Here are two examples of how to use the `TrubricsCallbackHandler` with Langchain [LLMs](https://python.langchain.com/docs/modules/model_io/llms/) or [Chat Models](https://python.langchain.com/docs/modules/model_io/chat/). We will use OpenAI models, so set your `OPENAI_API_KEY` key here:"
|
||||
"Here are two examples of how to use the `TrubricsCallbackHandler` with Langchain [LLMs](/docs/modules/model_io/llms/) or [Chat Models](/docs/modules/model_io/chat/). We will use OpenAI models, so set your `OPENAI_API_KEY` key here:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"source": [
|
||||
"## Environment Setup\n",
|
||||
"\n",
|
||||
"We'll need to get a [Anthropic](https://console.anthropic.com/settings/keys) and set the `ANTHROPIC_API_KEY` environment variable:"
|
||||
"We'll need to get an [Anthropic](https://console.anthropic.com/settings/keys) API key and set the `ANTHROPIC_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"The integration lives in the `langchain-community` package. We also need to install the `cohere` package itself. We can install these with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install -U langchain-community cohere\n",
|
||||
"pip install -U langchain-community langchain-cohere\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We'll also need to get a [Cohere API key](https://cohere.com/) and set the `COHERE_API_KEY` environment variable:"
|
||||
@@ -40,18 +40,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 1,
|
||||
"id": "2108b517-1e8d-473d-92fa-4f930e8072a7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
@@ -90,20 +82,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 3,
|
||||
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatCohere\n",
|
||||
"from langchain_cohere import ChatCohere\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -115,7 +107,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -124,22 +116,22 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Who's there?\")"
|
||||
"AIMessage(content=\"4! That's one, two, three, four. Keep adding and we'll reach new heights!\", response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'token_count': {'prompt_tokens': 73, 'response_tokens': 21, 'total_tokens': 94, 'billed_tokens': 25}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [HumanMessage(content=\"knock knock\")]\n",
|
||||
"messages = [HumanMessage(content=\"1\"), HumanMessage(content=\"2 3\")]\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -148,10 +140,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Who's there?\")"
|
||||
"AIMessage(content='4! According to the rules of addition, 1 + 2 equals 3, and 3 + 3 equals 6.', response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'token_count': {'prompt_tokens': 73, 'response_tokens': 28, 'total_tokens': 101, 'billed_tokens': 32}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -162,7 +154,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"id": "025be980-e50d-4a68-93dc-c9c7b500ce34",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
@@ -172,7 +164,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Who's there?"
|
||||
"4! It's a pleasure to be of service in this mathematical game."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -183,17 +175,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"id": "064288e4-f184-4496-9427-bcf148fa055e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Who's there?\")]"
|
||||
"[AIMessage(content='4! According to the rules of addition, 1 + 2 equals 3, and 3 + 3 equals 6.', response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'token_count': {'prompt_tokens': 73, 'response_tokens': 28, 'total_tokens': 101, 'billed_tokens': 32}})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -214,7 +206,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 9,
|
||||
"id": "0851b103",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -227,17 +219,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"id": "ae950c0f-1691-47f1-b609-273033cae707",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Why did the bear go to the chiropractor?\\n\\nBecause she was feeling a bit grizzly!\\n\\nHope you found that joke about bears to be a little bit amusing! If you'd like to hear another one, just let me know. In the meantime, if you have any other questions or need assistance with a different topic, feel free to let me know. \\n\\nJust remember, even if you have a sore back like the bear, it's always best to consult a licensed professional for injuries or pain you may be experiencing. \\n\\nWould you like me to tell you another joke?\")"
|
||||
"AIMessage(content='What do you call a bear with no teeth? A gummy bear!', response_metadata={'documents': None, 'citations': None, 'search_results': None, 'search_queries': None, 'token_count': {'prompt_tokens': 72, 'response_tokens': 14, 'total_tokens': 86, 'billed_tokens': 20}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -263,7 +255,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
155
docs/docs/integrations/chat/dappier.ipynb
Normal file
155
docs/docs/integrations/chat/dappier.ipynb
Normal file
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Dappier AI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Dappier: Powering AI with Dynamic, Real-Time Data Models**\n",
|
||||
"\n",
|
||||
"Dappier offers a cutting-edge platform that grants developers immediate access to a wide array of real-time data models spanning news, entertainment, finance, market data, weather, and beyond. With our pre-trained data models, you can supercharge your AI applications, ensuring they deliver precise, up-to-date responses and minimize inaccuracies.\n",
|
||||
"\n",
|
||||
"Dappier data models help you build next-gen LLM apps with trusted, up-to-date content from the world's leading brands. Unleash your creativity and enhance any GPT App or AI workflow with actionable, proprietary, data through a simple API. Augment your AI with proprietary data from trusted sources is the best way to ensure factual, up-to-date, responses with fewer hallucinations no matter the question.\n",
|
||||
"\n",
|
||||
"For Developers, By Developers\n",
|
||||
"Designed with developers in mind, Dappier simplifies the journey from data integration to monetization, providing clear, straightforward paths to deploy and earn from your AI models. Experience the future of monetization infrastructure for the new internet at **https://dappier.com/**."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This example goes over how to use LangChain to interact with Dappier AI models\n",
|
||||
"\n",
|
||||
"-----------------------------------------------------------------------------------"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To use one of our Dappier AI Data Models, you will need an API key. Please visit Dappier Platform (https://platform.dappier.com/) to log in and create an API key in your profile.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"You can find more details on the API reference : https://docs.dappier.com/introduction"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To work with our Dappier Chat Model you can pass the key directly through the parameter named dappier_api_key when initiating the class\n",
|
||||
"or set as an environment variable.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export DAPPIER_API_KEY=\"...\"\n",
|
||||
"```\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.dappier import ChatDappierAI\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = ChatDappierAI(\n",
|
||||
" dappier_endpoint=\"https://api.dappier.com/app/datamodelconversation\",\n",
|
||||
" dappier_model=\"dm_01hpsxyfm2fwdt2zet9cg6fdxt\",\n",
|
||||
" dappier_api_key=\"...\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Hey there! The Kansas City Chiefs won Super Bowl LVIII in 2024. They beat the San Francisco 49ers in overtime with a final score of 25-22. It was quite the game! 🏈')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [HumanMessage(content=\"Who won the super bowl in 2024?\")]\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='The Kansas City Chiefs won Super Bowl LVIII in 2024! 🏈')"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await chat.ainvoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
286
docs/docs/integrations/chat/friendli.ipynb
Normal file
286
docs/docs/integrations/chat/friendli.ipynb
Normal file
@@ -0,0 +1,286 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Friendli\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatFriendli\n",
|
||||
"\n",
|
||||
"> [Friendli](https://friendli.ai/) enhances AI application performance and optimizes cost savings with scalable, efficient deployment options, tailored for high-demand AI workloads.\n",
|
||||
"\n",
|
||||
"This tutorial guides you through integrating `ChatFriendli` for chat applications using LangChain. `ChatFriendli` offers a flexible approach to generating conversational AI responses, supporting both synchronous and asynchronous calls."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Ensure the `langchain_community` and `friendli-client` are installed.\n",
|
||||
"\n",
|
||||
"```sh\n",
|
||||
"pip install -U langchain-comminity friendli-client.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Sign in to [Friendli Suite](https://suite.friendli.ai/) to create a Personal Access Token, and set it as the `FRIENDLI_TOKEN` environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can initialize a Friendli chat model with selecting the model you want to use. The default model is `mixtral-8x7b-instruct-v0-1`. You can check the available models at [docs.friendli.ai](https://docs.periflow.ai/guides/serverless_endpoints/pricing#text-generation-models)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models.friendli import ChatFriendli\n",
|
||||
"\n",
|
||||
"chat = ChatFriendli(model=\"llama-2-13b-chat\", max_tokens=100, temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"`FrienliChat` supports all methods of [`ChatModel`](/docs/modules/model_io/chat/) including async APIs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also use functionality of `invoke`, `batch`, `generate`, and `stream`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages.human import HumanMessage\n",
|
||||
"from langchain_core.messages.system import SystemMessage\n",
|
||||
"\n",
|
||||
"system_message = SystemMessage(content=\"Answer questions as short as you can.\")\n",
|
||||
"human_message = HumanMessage(content=\"Tell me a joke.\")\n",
|
||||
"messages = [system_message, human_message]\n",
|
||||
"\n",
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"),\n",
|
||||
" AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat.batch([messages, messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\", message=AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"))], [ChatGeneration(text=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\", message=AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"))]], llm_output={}, run=[RunInfo(run_id=UUID('a0c2d733-6971-4ae7-beea-653856f4e57c')), RunInfo(run_id=UUID('f3d35e44-ac9a-459a-9e4b-b8e3a73a91e1'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat.generate([messages, messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Knock, knock!\n",
|
||||
"Who's there?\n",
|
||||
"Cows go.\n",
|
||||
"Cows go who?\n",
|
||||
"MOO!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in chat.stream(messages):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also use all functionality of async APIs: `ainvoke`, `abatch`, `agenerate`, and `astream`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await chat.ainvoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"),\n",
|
||||
" AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\")]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await chat.abatch([messages, messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[ChatGeneration(text=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\", message=AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"))], [ChatGeneration(text=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\", message=AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"))]], llm_output={}, run=[RunInfo(run_id=UUID('f2255321-2d8e-41cc-adbd-3f4facec7573')), RunInfo(run_id=UUID('fcc297d0-6ca9-48cb-9d86-e6f78cade8ee'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await chat.agenerate([messages, messages])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Knock, knock!\n",
|
||||
"Who's there?\n",
|
||||
"Cows go.\n",
|
||||
"Cows go who?\n",
|
||||
"MOO!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async for chunk in chat.astream(messages):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -13,9 +13,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
"collapsed": true,
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -28,13 +31,14 @@
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"To get GigaChat credentials you need to [create account](https://developers.sber.ru/studio/login) and [get access to API](https://developers.sber.ru/docs/ru/gigachat/api/integration)\n",
|
||||
"To get GigaChat credentials you need to [create account](https://developers.sber.ru/studio/login) and [get access to API](https://developers.sber.ru/docs/ru/gigachat/individuals-quickstart)\n",
|
||||
"\n",
|
||||
"## Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
@@ -48,7 +52,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
@@ -56,12 +60,12 @@
|
||||
"source": [
|
||||
"from langchain_community.chat_models import GigaChat\n",
|
||||
"\n",
|
||||
"chat = GigaChat(verify_ssl_certs=False)"
|
||||
"chat = GigaChat(verify_ssl_certs=False, scope=\"GIGACHAT_API_PERS\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"execution_count": 8,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
@@ -70,7 +74,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"What do you get when you cross a goat and a skunk? A smelly goat!\n"
|
||||
"The capital of Russia is Moscow.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -81,10 +85,10 @@
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful AI that shares everything you know. Talk in English.\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(content=\"Tell me a joke\"),\n",
|
||||
" HumanMessage(content=\"What is capital of Russia?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"print(chat(messages).content)"
|
||||
"print(chat.invoke(messages).content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"\n",
|
||||
"In particular, we will:\n",
|
||||
"1. Utilize the [HuggingFaceTextGenInference](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/huggingface_text_gen_inference.py), [HuggingFaceEndpoint](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/huggingface_endpoint.py), or [HuggingFaceHub](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/huggingface_hub.py) integrations to instantiate an `LLM`.\n",
|
||||
"2. Utilize the `ChatHuggingFace` class to enable any of these LLMs to interface with LangChain's [Chat Messages](https://python.langchain.com/docs/modules/model_io/chat/#messages) abstraction.\n",
|
||||
"2. Utilize the `ChatHuggingFace` class to enable any of these LLMs to interface with LangChain's [Chat Messages](/docs/modules/model_io/chat/#messages) abstraction.\n",
|
||||
"3. Demonstrate how to use an open-source LLM to power an `ChatAgent` pipeline\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -280,7 +280,7 @@
|
||||
"source": [
|
||||
"## 3. Take it for a spin as an agent!\n",
|
||||
"\n",
|
||||
"Here we'll test out `Zephyr-7B-beta` as a zero-shot `ReAct` Agent. The example below is taken from [here](https://python.langchain.com/docs/modules/agents/agent_types/react#using-chat-models).\n",
|
||||
"Here we'll test out `Zephyr-7B-beta` as a zero-shot `ReAct` Agent. The example below is taken from [here](/docs/modules/agents/agent_types/react#using-chat-models).\n",
|
||||
"\n",
|
||||
"> Note: To run this section, you'll need to have a [SerpAPI Token](https://serpapi.com/) saved as an environment variable: `SERPAPI_API_KEY`"
|
||||
]
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
"source": [
|
||||
"# Llama2Chat\n",
|
||||
"\n",
|
||||
"This notebook shows how to augment Llama-2 `LLM`s with the `Llama2Chat` wrapper to support the [Llama-2 chat prompt format](https://huggingface.co/blog/llama2#how-to-prompt-llama-2). Several `LLM` implementations in LangChain can be used as interface to Llama-2 chat models. These include [HuggingFaceTextGenInference](https://python.langchain.com/docs/integrations/llms/huggingface_textgen_inference), [LlamaCpp](https://python.langchain.com/docs/use_cases/question_answering/how_to/local_retrieval_qa), [GPT4All](https://python.langchain.com/docs/integrations/llms/gpt4all), ..., to mention a few examples. \n",
|
||||
"This notebook shows how to augment Llama-2 `LLM`s with the `Llama2Chat` wrapper to support the [Llama-2 chat prompt format](https://huggingface.co/blog/llama2#how-to-prompt-llama-2). Several `LLM` implementations in LangChain can be used as interface to Llama-2 chat models. These include [ChatHuggingFace](/docs/integrations/chat/huggingface), [LlamaCpp](/docs/use_cases/question_answering/local_retrieval_qa), [GPT4All](/docs/integrations/llms/gpt4all), ..., to mention a few examples. \n",
|
||||
"\n",
|
||||
"`Llama2Chat` is a generic wrapper that implements `BaseChatModel` and can therefore be used in applications as [chat model](https://python.langchain.com/docs/modules/model_io/models/chat/). `Llama2Chat` converts a list of [chat messages](https://python.langchain.com/docs/modules/model_io/models/chat/#messages) into the [required chat prompt format](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) and forwards the formatted prompt as `str` to the wrapped `LLM`."
|
||||
"`Llama2Chat` is a generic wrapper that implements `BaseChatModel` and can therefore be used in applications as [chat model](/docs/modules/model_io/chat/). `Llama2Chat` converts a list of Messages into the [required chat prompt format](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) and forwards the formatted prompt as `str` to the wrapped `LLM`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -77,7 +77,7 @@
|
||||
"id": "2ff99380",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"A [HuggingFaceTextGenInference](https://python.langchain.com/docs/integrations/llms/huggingface_textgen_inference) LLM encapsulates access to a [text-generation-inference](https://github.com/huggingface/text-generation-inference) server. In the following example, the inference server serves a [meta-llama/Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) model. It can be started locally with:\n",
|
||||
"A HuggingFaceTextGenInference LLM encapsulates access to a [text-generation-inference](https://github.com/huggingface/text-generation-inference) server. In the following example, the inference server serves a [meta-llama/Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) model. It can be started locally with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker run \\\n",
|
||||
@@ -220,380 +220,17 @@
|
||||
"id": "52c1a0b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For using a Llama-2 chat model with a [LlamaCPP](https://python.langchain.com/docs/integrations/llms/llamacpp) `LMM`, install the `llama-cpp-python` library using [these installation instructions](https://python.langchain.com/docs/integrations/llms/llamacpp#installation). The following example uses a quantized [llama-2-7b-chat.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_0.gguf) model stored locally at `~/Models/llama-2-7b-chat.Q4_0.gguf`. \n",
|
||||
"For using a Llama-2 chat model with a [LlamaCPP](/docs/integrations/llms/llamacpp) `LMM`, install the `llama-cpp-python` library using [these installation instructions](/docs/integrations/llms/llamacpp#installation). The following example uses a quantized [llama-2-7b-chat.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_0.gguf) model stored locally at `~/Models/llama-2-7b-chat.Q4_0.gguf`. \n",
|
||||
"\n",
|
||||
"After creating a `LlamaCpp` instance, the `llm` is again wrapped into `Llama2Chat`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "07c0d04e",
|
||||
"execution_count": null,
|
||||
"id": "18d10bc3-ede6-4410-a867-7c623a0efdb8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"llama_model_loader: loaded meta data with 19 key-value pairs and 291 tensors from /home/martin/Models/llama-2-7b-chat.Q4_0.gguf (version GGUF V2)\n",
|
||||
"llama_model_loader: - tensor 0: token_embd.weight q4_0 [ 4096, 32000, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 1: blk.0.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 2: blk.0.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 3: blk.0.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 4: blk.0.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 5: blk.0.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 6: blk.0.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 7: blk.0.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 8: blk.0.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 9: blk.0.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 10: blk.1.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 11: blk.1.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 12: blk.1.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 13: blk.1.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 14: blk.1.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 15: blk.1.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 16: blk.1.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 17: blk.1.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 18: blk.1.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 19: blk.10.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 20: blk.10.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 21: blk.10.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 22: blk.10.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 23: blk.10.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 24: blk.10.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 25: blk.10.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 26: blk.10.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 27: blk.10.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 28: blk.11.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 29: blk.11.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 30: blk.11.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 31: blk.11.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 32: blk.11.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 33: blk.11.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 34: blk.11.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 35: blk.11.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 36: blk.11.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 37: blk.12.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 38: blk.12.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 39: blk.12.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 40: blk.12.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 41: blk.12.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 42: blk.12.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 43: blk.12.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 44: blk.12.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 45: blk.12.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 46: blk.13.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 47: blk.13.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 48: blk.13.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 49: blk.13.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 50: blk.13.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 51: blk.13.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 52: blk.13.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 53: blk.13.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 54: blk.13.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 55: blk.14.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 56: blk.14.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 57: blk.14.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 58: blk.14.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 59: blk.14.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 60: blk.14.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 61: blk.14.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 62: blk.14.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 63: blk.14.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 64: blk.15.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 65: blk.15.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 66: blk.15.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 67: blk.15.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 68: blk.15.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 69: blk.15.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 70: blk.15.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 71: blk.15.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 72: blk.15.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 73: blk.16.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 74: blk.16.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 75: blk.16.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 76: blk.16.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 77: blk.16.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 78: blk.16.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 79: blk.16.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 80: blk.16.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 81: blk.16.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 82: blk.17.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 83: blk.17.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 84: blk.17.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 85: blk.17.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 86: blk.17.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 87: blk.17.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 88: blk.17.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 89: blk.17.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 90: blk.17.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 91: blk.18.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 92: blk.18.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 93: blk.18.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 94: blk.18.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 95: blk.18.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 96: blk.18.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 97: blk.18.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 98: blk.18.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 99: blk.18.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 100: blk.19.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 101: blk.19.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 102: blk.19.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 103: blk.19.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 104: blk.19.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 105: blk.19.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 106: blk.19.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 107: blk.19.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 108: blk.19.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 109: blk.2.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 110: blk.2.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 111: blk.2.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 112: blk.2.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 113: blk.2.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 114: blk.2.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 115: blk.2.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 116: blk.2.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 117: blk.2.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 118: blk.20.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 119: blk.20.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 120: blk.20.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 121: blk.20.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 122: blk.20.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 123: blk.20.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 124: blk.20.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 125: blk.20.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 126: blk.20.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 127: blk.21.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 128: blk.21.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 129: blk.21.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 130: blk.21.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 131: blk.21.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 132: blk.21.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 133: blk.21.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 134: blk.21.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 135: blk.21.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 136: blk.22.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 137: blk.22.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 138: blk.22.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 139: blk.22.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 140: blk.22.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 141: blk.22.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 142: blk.22.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 143: blk.22.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 144: blk.22.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 145: blk.23.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 146: blk.23.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 147: blk.23.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 148: blk.23.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 149: blk.23.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 150: blk.23.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 151: blk.23.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 152: blk.23.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 153: blk.23.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 154: blk.3.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 155: blk.3.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 156: blk.3.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 157: blk.3.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 158: blk.3.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 159: blk.3.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 160: blk.3.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 161: blk.3.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 162: blk.3.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 163: blk.4.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 164: blk.4.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 165: blk.4.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 166: blk.4.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 167: blk.4.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 168: blk.4.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 169: blk.4.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 170: blk.4.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 171: blk.4.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 172: blk.5.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 173: blk.5.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 174: blk.5.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 175: blk.5.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 176: blk.5.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 177: blk.5.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 178: blk.5.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 179: blk.5.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 180: blk.5.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 181: blk.6.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 182: blk.6.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 183: blk.6.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 184: blk.6.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 185: blk.6.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 186: blk.6.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 187: blk.6.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 188: blk.6.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 189: blk.6.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 190: blk.7.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 191: blk.7.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 192: blk.7.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 193: blk.7.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 194: blk.7.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 195: blk.7.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 196: blk.7.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 197: blk.7.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 198: blk.7.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 199: blk.8.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 200: blk.8.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 201: blk.8.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 202: blk.8.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 203: blk.8.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 204: blk.8.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 205: blk.8.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 206: blk.8.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 207: blk.8.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 208: blk.9.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 209: blk.9.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 210: blk.9.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 211: blk.9.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 212: blk.9.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 213: blk.9.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 214: blk.9.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 215: blk.9.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 216: blk.9.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 217: output.weight q6_K [ 4096, 32000, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 218: blk.24.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 219: blk.24.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 220: blk.24.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 221: blk.24.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 222: blk.24.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 223: blk.24.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 224: blk.24.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 225: blk.24.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 226: blk.24.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 227: blk.25.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 228: blk.25.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 229: blk.25.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 230: blk.25.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 231: blk.25.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 232: blk.25.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 233: blk.25.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 234: blk.25.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 235: blk.25.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 236: blk.26.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 237: blk.26.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 238: blk.26.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 239: blk.26.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 240: blk.26.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 241: blk.26.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 242: blk.26.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 243: blk.26.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 244: blk.26.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 245: blk.27.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 246: blk.27.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 247: blk.27.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 248: blk.27.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 249: blk.27.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 250: blk.27.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 251: blk.27.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 252: blk.27.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 253: blk.27.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 254: blk.28.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 255: blk.28.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 256: blk.28.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 257: blk.28.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 258: blk.28.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 259: blk.28.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 260: blk.28.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 261: blk.28.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 262: blk.28.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 263: blk.29.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 264: blk.29.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 265: blk.29.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 266: blk.29.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 267: blk.29.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 268: blk.29.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 269: blk.29.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 270: blk.29.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 271: blk.29.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 272: blk.30.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 273: blk.30.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 274: blk.30.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 275: blk.30.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 276: blk.30.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 277: blk.30.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 278: blk.30.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 279: blk.30.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 280: blk.30.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 281: blk.31.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 282: blk.31.ffn_down.weight q4_0 [ 11008, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 283: blk.31.ffn_gate.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 284: blk.31.ffn_up.weight q4_0 [ 4096, 11008, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 285: blk.31.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 286: blk.31.attn_k.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 287: blk.31.attn_output.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 288: blk.31.attn_q.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 289: blk.31.attn_v.weight q4_0 [ 4096, 4096, 1, 1 ]\n",
|
||||
"llama_model_loader: - tensor 290: output_norm.weight f32 [ 4096, 1, 1, 1 ]\n",
|
||||
"llama_model_loader: - kv 0: general.architecture str \n",
|
||||
"llama_model_loader: - kv 1: general.name str \n",
|
||||
"llama_model_loader: - kv 2: llama.context_length u32 \n",
|
||||
"llama_model_loader: - kv 3: llama.embedding_length u32 \n",
|
||||
"llama_model_loader: - kv 4: llama.block_count u32 \n",
|
||||
"llama_model_loader: - kv 5: llama.feed_forward_length u32 \n",
|
||||
"llama_model_loader: - kv 6: llama.rope.dimension_count u32 \n",
|
||||
"llama_model_loader: - kv 7: llama.attention.head_count u32 \n",
|
||||
"llama_model_loader: - kv 8: llama.attention.head_count_kv u32 \n",
|
||||
"llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 \n",
|
||||
"llama_model_loader: - kv 10: general.file_type u32 \n",
|
||||
"llama_model_loader: - kv 11: tokenizer.ggml.model str \n",
|
||||
"llama_model_loader: - kv 12: tokenizer.ggml.tokens arr \n",
|
||||
"llama_model_loader: - kv 13: tokenizer.ggml.scores arr \n",
|
||||
"llama_model_loader: - kv 14: tokenizer.ggml.token_type arr \n",
|
||||
"llama_model_loader: - kv 15: tokenizer.ggml.bos_token_id u32 \n",
|
||||
"llama_model_loader: - kv 16: tokenizer.ggml.eos_token_id u32 \n",
|
||||
"llama_model_loader: - kv 17: tokenizer.ggml.unknown_token_id u32 \n",
|
||||
"llama_model_loader: - kv 18: general.quantization_version u32 \n",
|
||||
"llama_model_loader: - type f32: 65 tensors\n",
|
||||
"llama_model_loader: - type q4_0: 225 tensors\n",
|
||||
"llama_model_loader: - type q6_K: 1 tensors\n",
|
||||
"llm_load_vocab: special tokens definition check successful ( 259/32000 ).\n",
|
||||
"llm_load_print_meta: format = GGUF V2\n",
|
||||
"llm_load_print_meta: arch = llama\n",
|
||||
"llm_load_print_meta: vocab type = SPM\n",
|
||||
"llm_load_print_meta: n_vocab = 32000\n",
|
||||
"llm_load_print_meta: n_merges = 0\n",
|
||||
"llm_load_print_meta: n_ctx_train = 4096\n",
|
||||
"llm_load_print_meta: n_embd = 4096\n",
|
||||
"llm_load_print_meta: n_head = 32\n",
|
||||
"llm_load_print_meta: n_head_kv = 32\n",
|
||||
"llm_load_print_meta: n_layer = 32\n",
|
||||
"llm_load_print_meta: n_rot = 128\n",
|
||||
"llm_load_print_meta: n_gqa = 1\n",
|
||||
"llm_load_print_meta: f_norm_eps = 0.0e+00\n",
|
||||
"llm_load_print_meta: f_norm_rms_eps = 1.0e-06\n",
|
||||
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
|
||||
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
|
||||
"llm_load_print_meta: n_ff = 11008\n",
|
||||
"llm_load_print_meta: rope scaling = linear\n",
|
||||
"llm_load_print_meta: freq_base_train = 10000.0\n",
|
||||
"llm_load_print_meta: freq_scale_train = 1\n",
|
||||
"llm_load_print_meta: n_yarn_orig_ctx = 4096\n",
|
||||
"llm_load_print_meta: rope_finetuned = unknown\n",
|
||||
"llm_load_print_meta: model type = 7B\n",
|
||||
"llm_load_print_meta: model ftype = mostly Q4_0\n",
|
||||
"llm_load_print_meta: model params = 6.74 B\n",
|
||||
"llm_load_print_meta: model size = 3.56 GiB (4.54 BPW) \n",
|
||||
"llm_load_print_meta: general.name = LLaMA v2\n",
|
||||
"llm_load_print_meta: BOS token = 1 '<s>'\n",
|
||||
"llm_load_print_meta: EOS token = 2 '</s>'\n",
|
||||
"llm_load_print_meta: UNK token = 0 '<unk>'\n",
|
||||
"llm_load_print_meta: LF token = 13 '<0x0A>'\n",
|
||||
"llm_load_tensors: ggml ctx size = 0.11 MB\n",
|
||||
"llm_load_tensors: mem required = 3647.97 MB\n",
|
||||
"..................................................................................................\n",
|
||||
"llama_new_context_with_model: n_ctx = 512\n",
|
||||
"llama_new_context_with_model: freq_base = 10000.0\n",
|
||||
"llama_new_context_with_model: freq_scale = 1\n",
|
||||
"llama_new_context_with_model: kv self size = 256.00 MB\n",
|
||||
"llama_build_graph: non-view tensors processed: 740/740\n",
|
||||
"llama_new_context_with_model: compute buffer total size = 2.66 MB\n",
|
||||
"AVX = 1 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 1 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from os.path import expanduser\n",
|
||||
"\n",
|
||||
@@ -731,7 +368,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -65,6 +65,7 @@
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"\n",
|
||||
"llm = ChatMaritalk(\n",
|
||||
" model=\"sabia-2-medium\", # Available models: sabia-2-small and sabia-2-medium\n",
|
||||
" api_key=\"\", # Insert your API key here\n",
|
||||
" temperature=0.7,\n",
|
||||
" max_tokens=100,\n",
|
||||
|
||||
@@ -1005,7 +1005,7 @@
|
||||
"id": "79efa62d"
|
||||
},
|
||||
"source": [
|
||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/types/buffer) example applied to the `mixtral_8x7b` model."
|
||||
"Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](/docs/modules/memory/types/buffer) example applied to the `mixtral_8x7b` model."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -107,7 +107,7 @@
|
||||
"\n",
|
||||
"# using LangChain Expressive Language chain syntax\n",
|
||||
"# learn more about the LCEL on\n",
|
||||
"# https://python.langchain.com/docs/expression_language/why\n",
|
||||
"# /docs/expression_language/why\n",
|
||||
"chain = prompt | llm | StrOutputParser()\n",
|
||||
"\n",
|
||||
"# for brevity, response is printed in terminal\n",
|
||||
@@ -235,7 +235,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Take a look at the [LangChain Expressive Language (LCEL) Interface](https://python.langchain.com/docs/expression_language/interface) for the other available interfaces for use when a chain is created.\n",
|
||||
"Take a look at the [LangChain Expressive Language (LCEL) Interface](/docs/expression_language/interface) for the other available interfaces for use when a chain is created.\n",
|
||||
"\n",
|
||||
"## Building from source\n",
|
||||
"\n",
|
||||
@@ -250,7 +250,7 @@
|
||||
" \n",
|
||||
"Use the latest version of Ollama and supply the [`format`](https://github.com/jmorganca/ollama/blob/main/docs/api.md#json-mode) flag. The `format` flag will force the model to produce the response in JSON.\n",
|
||||
"\n",
|
||||
"> **Note:** You can also try out the experimental [OllamaFunctions](https://python.langchain.com/docs/integrations/chat/ollama_functions) wrapper for convenience."
|
||||
"> **Note:** You can also try out the experimental [OllamaFunctions](/docs/integrations/chat/ollama_functions) wrapper for convenience."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
286
docs/docs/integrations/chat/premai.ipynb
Normal file
286
docs/docs/integrations/chat/premai.ipynb
Normal file
@@ -0,0 +1,286 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: PremAI\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatPremAI\n",
|
||||
"\n",
|
||||
">[PremAI](https://app.premai.io) is a unified platform that lets you build powerful production-ready GenAI-powered applications with the least effort so that you can focus more on user experience and overall growth. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `ChatPremAI`. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation and setup\n",
|
||||
"\n",
|
||||
"We start by installing langchain and premai-sdk. You can type the following command to install:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install premai langchain\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Before proceeding further, please make sure that you have made an account on PremAI and already started a project. If not, then here's how you can start for free:\n",
|
||||
"\n",
|
||||
"1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/).\n",
|
||||
"\n",
|
||||
"2. Go to [app.premai.io](https://app.premai.io) and this will take you to the project's dashboard. \n",
|
||||
"\n",
|
||||
"3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. \n",
|
||||
"\n",
|
||||
"4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation parameters (like max-tokens, temperature, etc) and also pre-set your system prompt. \n",
|
||||
"\n",
|
||||
"Congratulations on creating your first deployed application on PremAI 🎉 Now we can use langchain to interact with our application. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatPremAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup ChatPremAI instance in LangChain \n",
|
||||
"\n",
|
||||
"Once we import our required modules, let's set up our client. For now, let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise, it will throw an error.\n",
|
||||
"\n",
|
||||
"To use langchain with prem, you do not need to pass any model name or set any parameters with our chat client. All of those will use the default model name and parameters of the LaunchPad model. \n",
|
||||
"\n",
|
||||
"`NOTE:` If you change the `model_name` or any other parameter like `temperature` while setting the client, it will override existing default configurations. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# First step is to set up the env variable.\n",
|
||||
"# you can also pass the API key while instantiating the model but this\n",
|
||||
"# comes under a best practices to set it as env variable.\n",
|
||||
"\n",
|
||||
"if os.environ.get(\"PREMAI_API_KEY\") is None:\n",
|
||||
" os.environ[\"PREMAI_API_KEY\"] = getpass.getpass(\"PremAI API Key:\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# By default it will use the model which was deployed through the platform\n",
|
||||
"# in my case it will is \"claude-3-haiku\"\n",
|
||||
"\n",
|
||||
"chat = ChatPremAI(project_id=8)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Calling the Model\n",
|
||||
"\n",
|
||||
"Now you are all set. We can now start by interacting with our application. `ChatPremAI` supports two methods `invoke` (which is the same as `generate`) and `stream`. \n",
|
||||
"\n",
|
||||
"The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions. \n",
|
||||
"\n",
|
||||
"### Generation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I am an artificial intelligence created by Anthropic. I'm here to help with a wide variety of tasks, from research and analysis to creative projects and open-ended conversation. I have general knowledge and capabilities, but I'm not a real person - I'm an AI assistant. Please let me know if you have any other questions!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"human_message = HumanMessage(content=\"Who are you?\")\n",
|
||||
"\n",
|
||||
"response = chat.invoke([human_message])\n",
|
||||
"print(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Above looks interesting right? I set my default lanchpad system-prompt as: `Always sound like a pirate` You can also, override the default system prompt if you need to. Here's how you can do it. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"I am an artificial intelligence created by Anthropic. My purpose is to assist and converse with humans in a friendly and helpful way. I have a broad knowledge base that I can use to provide information, answer questions, and engage in discussions on a wide range of topics. Please let me know if you have any other questions - I'm here to help!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"system_message = SystemMessage(content=\"You are a friendly assistant.\")\n",
|
||||
"human_message = HumanMessage(content=\"Who are you?\")\n",
|
||||
"\n",
|
||||
"chat.invoke([system_message, human_message])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also change generation parameters while calling the model. Here's how you can do that"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='I am an artificial intelligence created by Anthropic')"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat.invoke([system_message, human_message], temperature=0.7, max_tokens=10, top_p=0.95)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Important notes:\n",
|
||||
"\n",
|
||||
"Before proceeding further, please note that the current version of ChatPrem does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop) are not supported. \n",
|
||||
"\n",
|
||||
"We will provide support for those two above parameters in sooner versions. \n",
|
||||
"\n",
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"And finally, here's how you do token streaming for dynamic chat like applications. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! As an AI language model, I don't have feelings or a physical state, but I'm functioning properly and ready to assist you with any questions or tasks you might have. How can I help you today?"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"for chunk in chat.stream(\"hello how are you\"):\n",
|
||||
" sys.stdout.write(chunk.content)\n",
|
||||
" sys.stdout.flush()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Similar to above, if you want to override the system-prompt and the generation parameters, here's how you can do it. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hello! As an AI language model, I don't have feelings or a physical form, but I'm functioning properly and ready to assist you. How can I help you today?"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"# For some experimental reasons if you want to override the system prompt then you\n",
|
||||
"# can pass that here too. However it is not recommended to override system prompt\n",
|
||||
"# of an already deployed model.\n",
|
||||
"\n",
|
||||
"for chunk in chat.stream(\n",
|
||||
" \"hello how are you\",\n",
|
||||
" system_prompt=\"act like a dog\",\n",
|
||||
" temperature=0.7,\n",
|
||||
" max_tokens=200,\n",
|
||||
"):\n",
|
||||
" sys.stdout.write(chunk.content)\n",
|
||||
" sys.stdout.flush()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -99,6 +99,36 @@
|
||||
"for chunk in chat.stream(\"Hello!\"):\n",
|
||||
" print(chunk.content, end=\"\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "566c85e0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## For v2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3103ebdf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\"\"\"For basic init and call\"\"\"\n",
|
||||
"from langchain_community.chat_models import ChatSparkLLM\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"chat = ChatSparkLLM(\n",
|
||||
" spark_app_id=\"<app_id>\",\n",
|
||||
" spark_api_key=\"<api_key>\",\n",
|
||||
" spark_api_secret=\"<api_secret>\",\n",
|
||||
" spark_api_url=\"wss://spark-api.xf-yun.com/v2.1/chat\",\n",
|
||||
" spark_llm_domain=\"generalv2\",\n",
|
||||
")\n",
|
||||
"message = HumanMessage(content=\"Hello\")\n",
|
||||
"chat([message])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"answer = chat_model(\n",
|
||||
"answer = chat_model.invoke(\n",
|
||||
" [\n",
|
||||
" SystemMessage(\n",
|
||||
" content=\"You are a helpful assistant that translates English to French.\"\n",
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "raw",
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: YUAN2\n",
|
||||
"sidebar_label: Yuan2.0\n",
|
||||
"---"
|
||||
],
|
||||
"metadata": {
|
||||
@@ -22,7 +22,7 @@
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# YUAN2.0\n",
|
||||
"# Yuan2.0\n",
|
||||
"\n",
|
||||
"This notebook shows how to use [YUAN2 API](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/docs/inference_server.md) in LangChain with the langchain.chat_models.ChatYuan2.\n",
|
||||
"\n",
|
||||
@@ -96,9 +96,9 @@
|
||||
},
|
||||
"source": [
|
||||
"### Setting Up Your API server\n",
|
||||
"Setting up your OpenAI compatible API server following [yuan2 openai api server](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/README-EN.md).\n",
|
||||
"If you deployed api server locally, you can simply set `api_key=\"EMPTY\"` or anything you want.\n",
|
||||
"Just make sure, the `api_base` is set correctly."
|
||||
"Setting up your OpenAI compatible API server following [yuan2 openai api server](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/docs/Yuan2_fastchat.md).\n",
|
||||
"If you deployed api server locally, you can simply set `yuan2_api_key=\"EMPTY\"` or anything you want.\n",
|
||||
"Just make sure, the `yuan2_api_base` is set correctly."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -187,7 +187,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(chat(messages))"
|
||||
"print(chat.invoke(messages))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -247,7 +247,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat(messages)"
|
||||
"chat.invoke(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte CDK"
|
||||
"# Airbyte CDK (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: `AirbyteCDKLoader` is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.\n",
|
||||
"\n",
|
||||
"A lot of source connectors are implemented using the [Airbyte CDK](https://docs.airbyte.com/connector-development/cdk-python/). This loader allows to run any of these connectors and return the data as documents."
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte Gong"
|
||||
"# Airbyte Gong (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: This connector-specific loader is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.\n",
|
||||
"\n",
|
||||
"This loader exposes the Gong connector as a document loader, allowing you to load various Gong objects as documents."
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte Hubspot"
|
||||
"# Airbyte Hubspot (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: `AirbyteHubspotLoader` is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.\n",
|
||||
"\n",
|
||||
"This loader exposes the Hubspot connector as a document loader, allowing you to load various Hubspot objects as documents."
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte JSON"
|
||||
"# Airbyte JSON (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: `AirbyteJSONLoader` is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases."
|
||||
]
|
||||
},
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte Salesforce"
|
||||
"# Airbyte Salesforce (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: This connector-specific loader is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.\n",
|
||||
"\n",
|
||||
"This loader exposes the Salesforce connector as a document loader, allowing you to load various Salesforce objects as documents."
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte Shopify"
|
||||
"# Airbyte Shopify (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: This connector-specific loader is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.\n",
|
||||
"\n",
|
||||
"This loader exposes the Shopify connector as a document loader, allowing you to load various Shopify objects as documents."
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte Stripe"
|
||||
"# Airbyte Stripe (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: This connector-specific loader is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.\n",
|
||||
"\n",
|
||||
"This loader exposes the Stripe connector as a document loader, allowing you to load various Stripe objects as documents."
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte Typeform"
|
||||
"# Airbyte Typeform (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: This connector-specific loader is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.\n",
|
||||
"\n",
|
||||
"This loader exposes the Typeform connector as a document loader, allowing you to load various Typeform objects as documents."
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "1f3a5ebf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Airbyte Zendesk Support"
|
||||
"# Airbyte Zendesk Support (Deprecated)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,6 +13,8 @@
|
||||
"id": "35ac77b1-449b-44f7-b8f3-3494d55c286e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note: This connector-specific loader is deprecated. Please use [`AirbyteLoader`](/docs/integrations/document_loaders/airbyte) instead.\n",
|
||||
"\n",
|
||||
">[Airbyte](https://github.com/airbytehq/airbyte) is a data integration platform for ELT pipelines from APIs, databases & files to warehouses & lakes. It has the largest catalog of ELT connectors to data warehouses and databases.\n",
|
||||
"\n",
|
||||
"This loader exposes the Zendesk Support connector as a document loader, allowing you to load various objects as documents."
|
||||
|
||||
@@ -206,6 +206,42 @@
|
||||
"len(documents)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a56ba97505c8d140",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"## Sample 4\n",
|
||||
"\n",
|
||||
"You have the option to pass an additional parameter called `linearization_config` to the AmazonTextractPDFLoader which will determine how the the text output will be linearized by the parser after Textract runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1efbc4b6-f3cb-45c5-bbe8-16e7df060b92",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import AmazonTextractPDFLoader\n",
|
||||
"from textractor.data.text_linearization_config import TextLinearizationConfig\n",
|
||||
"\n",
|
||||
"loader = AmazonTextractPDFLoader(\n",
|
||||
" \"s3://amazon-textract-public-content/langchain/layout-parser-paper.pdf\",\n",
|
||||
" linearization_config=TextLinearizationConfig(\n",
|
||||
" hide_header_layout=True,\n",
|
||||
" hide_footer_layout=True,\n",
|
||||
" hide_figure_layout=True,\n",
|
||||
" ),\n",
|
||||
")\n",
|
||||
"documents = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3e41b4d-b159-4274-89be-80d8159134ef",
|
||||
@@ -276,11 +312,14 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1a09d18b-ab7b-468e-ae66-f92abf666b9b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"cell_type": "markdown",
|
||||
"id": "bd97f1c90aff6a83",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
@@ -876,7 +915,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"\n",
|
||||
"Headless mode means that the browser is running without a graphical user interface.\n",
|
||||
"\n",
|
||||
"`AsyncChromiumLoader` load the page, and then we use `Html2TextTransformer` to trasnform to text."
|
||||
"`AsyncChromiumLoader` loads the page, and then we use `Html2TextTransformer` to transform to text."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -24,7 +24,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet playwright beautifulsoup4\n",
|
||||
"! playwright install"
|
||||
"!playwright install"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -53,6 +53,27 @@
|
||||
"docs[0].page_content[0:100]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c64e7df9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you are using Jupyter notebooks, you might need to apply `nest_asyncio` before loading the documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5f2fe3c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install nest-asyncio\n",
|
||||
"import nest_asyncio\n",
|
||||
"\n",
|
||||
"nest_asyncio.apply()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"\n",
|
||||
">[Bilibili](https://www.bilibili.tv/) is one of the most beloved long-form video sites in China.\n",
|
||||
"\n",
|
||||
"This loader utilizes the [bilibili-api](https://github.com/MoyuScript/bilibili-api) to fetch the text transcript from `Bilibili`.\n",
|
||||
"This loader utilizes the [bilibili-api](https://github.com/Nemo2011/bilibili-api) to fetch the text transcript from `Bilibili`.\n",
|
||||
"\n",
|
||||
"With this BiliBiliLoader, users can easily obtain the transcript of their desired video content on the platform."
|
||||
]
|
||||
@@ -58,9 +58,6 @@
|
||||
"id": "3470dadf",
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
},
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)\n",
|
||||
"!poetry run pip install dgml-utils==0.3.0 --upgrade --quiet"
|
||||
"!poetry run pip install docugami-langchain dgml-utils==0.3.0 --upgrade --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -56,7 +56,7 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.document_loaders import DocugamiLoader"
|
||||
"from docugami_langchain.document_loaders import DocugamiLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -118,7 +118,7 @@
|
||||
"\n",
|
||||
"1. You can set min and max chunk size, which the system tries to adhere to with minimal truncation. You can set `loader.min_text_length` and `loader.max_text_length` to control these.\n",
|
||||
"2. By default, only the text for chunks is returned. However, Docugami's XML knowledge graph has additional rich information including semantic tags for entities inside the chunk. Set `loader.include_xml_tags = True` if you want the additional xml metadata on the returned chunks.\n",
|
||||
"3. In addition, you can set `loader.parent_hierarchy_levels` if you want Docugami to return parent chunks in the chunks it returns. The child chunks point to the parent chunks via the `loader.parent_id_key` value. This is useful e.g. with the [MultiVector Retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector) for [small-to-big](https://www.youtube.com/watch?v=ihSiRrOUwmg) retrieval. See detailed example later in this notebook."
|
||||
"3. In addition, you can set `loader.parent_hierarchy_levels` if you want Docugami to return parent chunks in the chunks it returns. The child chunks point to the parent chunks via the `loader.parent_id_key` value. This is useful e.g. with the [MultiVector Retriever](/docs/modules/data_connection/retrievers/multi_vector) for [small-to-big](https://www.youtube.com/watch?v=ihSiRrOUwmg) retrieval. See detailed example later in this notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -457,7 +457,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Documents are inherently semi-structured and the DocugamiLoader is able to navigate the semantic and structural contours of the document to provide parent chunk references on the chunks it returns. This is useful e.g. with the [MultiVector Retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector) for [small-to-big](https://www.youtube.com/watch?v=ihSiRrOUwmg) retrieval.\n",
|
||||
"Documents are inherently semi-structured and the DocugamiLoader is able to navigate the semantic and structural contours of the document to provide parent chunk references on the chunks it returns. This is useful e.g. with the [MultiVector Retriever](/docs/modules/data_connection/retrievers/multi_vector) for [small-to-big](https://www.youtube.com/watch?v=ihSiRrOUwmg) retrieval.\n",
|
||||
"\n",
|
||||
"To get parent chunk references, you can set `loader.parent_hierarchy_levels` to a non-zero value."
|
||||
]
|
||||
@@ -470,7 +470,7 @@
|
||||
"source": [
|
||||
"from typing import Dict, List\n",
|
||||
"\n",
|
||||
"from langchain_community.document_loaders import DocugamiLoader\n",
|
||||
"from docugami_langchain.document_loaders import DocugamiLoader\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"loader = DocugamiLoader(docset_id=\"zo954yqy53wp\")\n",
|
||||
@@ -655,7 +655,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.9.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -47,7 +47,7 @@
|
||||
"id": "04981332",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create a GeoPandas dataframe from [`Open City Data`](https://python.langchain.com/docs/integrations/document_loaders/open_city_data) as an example input."
|
||||
"Create a GeoPandas dataframe from [`Open City Data`](/docs/integrations/document_loaders/open_city_data) as an example input."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -10,7 +10,11 @@
|
||||
"\n",
|
||||
"> [AlloyDB](https://cloud.google.com/alloydb) is a fully managed relational database service that offers high performance, seamless integration, and impressive scalability. AlloyDB is 100% compatible with PostgreSQL. Extend your database application to build AI-powered experiences leveraging AlloyDB's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use `AlloyDB for PostgreSQL` to load Documents with the `AlloyDBLoader` class."
|
||||
"This notebook goes over how to use `AlloyDB for PostgreSQL` to load Documents with the `AlloyDBLoader` class.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-alloydb-pg-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-alloydb-pg-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -24,7 +28,7 @@
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
" * [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
" * [Enable the AlloyDB Admin API.](https://console.cloud.google.com/flows/enableapi?apiid=alloydb.googleapis.com)\n",
|
||||
" * [Enable the AlloyDB API](https://console.cloud.google.com/flows/enableapi?apiid=alloydb.googleapis.com)\n",
|
||||
" * [Create a AlloyDB cluster and instance.](https://cloud.google.com/alloydb/docs/cluster-create)\n",
|
||||
" * [Create a AlloyDB database.](https://cloud.google.com/alloydb/docs/quickstart/create-and-connect)\n",
|
||||
" * [Add a User to the database.](https://cloud.google.com/alloydb/docs/database-users/about)"
|
||||
@@ -139,30 +143,6 @@
|
||||
"! gcloud config set project {PROJECT_ID}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "rEWWNoNnKOgq",
|
||||
"metadata": {
|
||||
"id": "rEWWNoNnKOgq"
|
||||
},
|
||||
"source": [
|
||||
"### 💡 API Enablement\n",
|
||||
"The `langchain-google-alloydb-pg` package requires that you [enable the AlloyDB Admin API](https://console.cloud.google.com/flows/enableapi?apiid=alloydb.googleapis.com) in your Google Cloud Project."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5utKIdq7KYi5",
|
||||
"metadata": {
|
||||
"id": "5utKIdq7KYi5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# enable AlloyDB Admin API\n",
|
||||
"!gcloud services enable alloydb.googleapis.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f8f2830ee9ca1e01",
|
||||
|
||||
@@ -8,7 +8,9 @@
|
||||
"\n",
|
||||
"> [Bigtable](https://cloud.google.com/bigtable) is a key-value and wide-column store, ideal for fast access to structured, semi-structured, or unstructured data. Extend your database application to build AI-powered experiences leveraging Bigtable's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Bigtable](https://cloud.google.com/bigtable) to [save, load and delete langchain documents](https://python.langchain.com/docs/modules/data_connection/document_loaders/) with `BigtableLoader` and `BigtableSaver`.\n",
|
||||
"This notebook goes over how to use [Bigtable](https://cloud.google.com/bigtable) to [save, load and delete langchain documents](/docs/modules/data_connection/document_loaders/) with `BigtableLoader` and `BigtableSaver`.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-bigtable-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-bigtable-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
@@ -22,6 +24,7 @@
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
"* [Enable the Bigtable API](https://console.cloud.google.com/flows/enableapi?apiid=bigtable.googleapis.com)\n",
|
||||
"* [Create a Bigtable instance](https://cloud.google.com/bigtable/docs/creating-instance)\n",
|
||||
"* [Create a Bigtable table](https://cloud.google.com/bigtable/docs/managing-tables)\n",
|
||||
"* [Create Bigtable access credentials](https://developers.google.com/workspace/guides/create-credentials)\n",
|
||||
@@ -461,7 +464,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.11.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -4,11 +4,13 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Cloud SQL for SQL Server\n",
|
||||
"# Google Cloud SQL for SQL server\n",
|
||||
"\n",
|
||||
"> [Cloud SQL](https://cloud.google.com/sql) is a fully managed relational database service that offers high performance, seamless integration, and impressive scalability. It offers [MySQL](https://cloud.google.com/sql/mysql), [PostgreSQL](https://cloud.google.com/sql/postgres), and [SQL Server](https://cloud.google.com/sql/sqlserver) database engines. Extend your database application to build AI-powered experiences leveraging Cloud SQL's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Cloud SQL for SQL Server](https://cloud.google.com/sql/sqlserver) to [save, load and delete langchain documents](https://python.langchain.com/docs/modules/data_connection/document_loaders/) with `MSSQLLoader` and `MSSQLDocumentSaver`.\n",
|
||||
"This notebook goes over how to use [Cloud SQL for SQL server](https://cloud.google.com/sql/sqlserver) to [save, load and delete langchain documents](/docs/modules/data_connection/document_loaders/) with `MSSQLLoader` and `MSSQLDocumentSaver`.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-cloud-sql-mssql-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-cloud-sql-mssql-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
@@ -22,9 +24,10 @@
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
"* [Create a Cloud SQL for SQL Server instance](https://cloud.google.com/sql/docs/sqlserver/create-instance)\n",
|
||||
"* [Create a Cloud SQL database](https://cloud.google.com/sql/docs/mssql/create-manage-databases)\n",
|
||||
"* [Add an IAM database user to the database](https://cloud.google.com/sql/docs/sqlserver/add-manage-iam-users#creating-a-database-user) (Optional)\n",
|
||||
"* [Enable the Cloud SQL Admin API.](https://console.cloud.google.com/marketplace/product/google/sqladmin.googleapis.com)\n",
|
||||
"* [Create a Cloud SQL for SQL server instance](https://cloud.google.com/sql/docs/sqlserver/create-instance)\n",
|
||||
"* [Create a Cloud SQL database](https://cloud.google.com/sql/docs/sqlserver/create-manage-databases)\n",
|
||||
"* [Add an IAM database user to the database](https://cloud.google.com/sql/docs/sqlserver/create-manage-users) (Optional)\n",
|
||||
"\n",
|
||||
"After confirmed access to database in the runtime environment of this notebook, filling the following values and run the cell before running example scripts."
|
||||
]
|
||||
@@ -170,7 +173,7 @@
|
||||
"\n",
|
||||
"Before saving or loading documents from MSSQL table, we need first configures a connection pool to Cloud SQL database. The `MSSQLEngine` configures a [SQLAlchemy connection pool](https://docs.sqlalchemy.org/en/20/core/pooling.html#module-sqlalchemy.pool) to your Cloud SQL database, enabling successful connections from your application and following industry best practices.\n",
|
||||
"\n",
|
||||
"To create a `MSSQLEngine` using `MSSQLEngine.from_instance()` you need to provide only 6 things:\n",
|
||||
"To create a `MSSQLEngine` using `MSSQLEngine.from_instance()` you need to provide only 4 things:\n",
|
||||
"\n",
|
||||
"1. `project_id` : Project ID of the Google Cloud Project where the Cloud SQL instance is located.\n",
|
||||
"1. `region` : Region where the Cloud SQL instance is located.\n",
|
||||
@@ -205,6 +208,7 @@
|
||||
"### Initialize a table\n",
|
||||
"\n",
|
||||
"Initialize a table of default schema via `MSSQLEngine.init_document_table(<table_name>)`. Table Columns:\n",
|
||||
"\n",
|
||||
"- page_content (type: text)\n",
|
||||
"- langchain_metadata (type: JSON)\n",
|
||||
"\n",
|
||||
@@ -227,6 +231,7 @@
|
||||
"### Save documents\n",
|
||||
"\n",
|
||||
"Save langchain documents with `MSSQLDocumentSaver.add_documents(<documents>)`. To initialize `MSSQLDocumentSaver` class you need to provide 2 things:\n",
|
||||
"\n",
|
||||
"1. `engine` - An instance of a `MSSQLEngine` engine.\n",
|
||||
"2. `table_name` - The name of the table within the Cloud SQL database to store langchain documents."
|
||||
]
|
||||
@@ -270,6 +275,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load langchain documents with `MSSQLLoader.load()` or `MSSQLLoader.lazy_load()`. `lazy_load` returns a generator that only queries database during the iteration. To initialize `MSSQLDocumentSaver` class you need to provide:\n",
|
||||
"\n",
|
||||
"1. `engine` - An instance of a `MSSQLEngine` engine.\n",
|
||||
"2. `table_name` - The name of the table within the Cloud SQL database to store langchain documents."
|
||||
]
|
||||
@@ -341,6 +347,7 @@
|
||||
"For table with default schema (page_content, langchain_metadata), the deletion criteria is:\n",
|
||||
"\n",
|
||||
"A `row` should be deleted if there exists a `document` in the list, such that\n",
|
||||
"\n",
|
||||
"- `document.page_content` equals `row[page_content]`\n",
|
||||
"- `document.metadata` equals `row[langchain_metadata]`"
|
||||
]
|
||||
@@ -448,6 +455,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can specify the content and metadata we want to load by setting the `content_columns` and `metadata_columns` when initializing the `MSSQLLoader`.\n",
|
||||
"\n",
|
||||
"1. `content_columns`: The columns to write into the `page_content` of the document.\n",
|
||||
"2. `metadata_columns`: The columns to write into the `metadata` of the document.\n",
|
||||
"\n",
|
||||
@@ -486,12 +494,14 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In order to save langchain document into table with customized metadata fields. We need first create such a table via `MSSQLEngine.init_document_table()`, and specify the list of `metadata_columns` we want it to have. In this example, the created table will have table columns:\n",
|
||||
"\n",
|
||||
"- description (type: text): for storing fruit description.\n",
|
||||
"- fruit_name (type text): for storing fruit name.\n",
|
||||
"- organic (type tinyint(1)): to tell if the fruit is organic.\n",
|
||||
"- other_metadata (type: JSON): for storing other metadata information of the fruit.\n",
|
||||
"\n",
|
||||
"We can use the following parameters with `MSSQLEngine.init_document_table()` to create the table:\n",
|
||||
"\n",
|
||||
"1. `table_name`: The name of the table within the Cloud SQL database to store langchain documents.\n",
|
||||
"2. `metadata_columns`: A list of `sqlalchemy.Column` indicating the list of metadata columns we need.\n",
|
||||
"3. `content_column`: The name of column to store `page_content` of langchain document. Default: `page_content`.\n",
|
||||
@@ -531,6 +541,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Save documents with `MSSQLDocumentSaver.add_documents(<documents>)`. As you can see in this example, \n",
|
||||
"\n",
|
||||
"- `document.page_content` will be saved into `description` column.\n",
|
||||
"- `document.metadata.fruit_name` will be saved into `fruit_name` column.\n",
|
||||
"- `document.metadata.organic` will be saved into `organic` column.\n",
|
||||
@@ -584,6 +595,7 @@
|
||||
"We can also delete documents from table with customized metadata columns via `MSSQLDocumentSaver.delete(<documents>)`. The deletion criteria is:\n",
|
||||
"\n",
|
||||
"A `row` should be deleted if there exists a `document` in the list, such that\n",
|
||||
"\n",
|
||||
"- `document.page_content` equals `row[page_content]`\n",
|
||||
"- For every metadata field `k` in `document.metadata`\n",
|
||||
" - `document.metadata[k]` equals `row[k]` or `document.metadata[k]` equals `row[langchain_metadata][k]`\n",
|
||||
|
||||
@@ -6,9 +6,11 @@
|
||||
"source": [
|
||||
"# Google Cloud SQL for MySQL\n",
|
||||
"\n",
|
||||
"> [Cloud SQL](https://cloud.google.com/sql) is a fully managed relational database service that offers high performance, seamless integration, and impressive scalability. It offers [MySQL](https://cloud.google.com/sql/mysql), [PostgreSQL](https://cloud.google.com/sql/postgres), and [SQL Server](https://cloud.google.com/sql/sqlserver) database engines. Extend your database application to build AI-powered experiences leveraging Cloud SQL's Langchain integrations.\n",
|
||||
"> [Cloud SQL](https://cloud.google.com/sql) is a fully managed relational database service that offers high performance, seamless integration, and impressive scalability. It offers [MySQL](https://cloud.google.com/sql/mysql), [PostgreSQL](https://cloud.google.com/sql/postgresql), and [SQL Server](https://cloud.google.com/sql/sqlserver) database engines. Extend your database application to build AI-powered experiences leveraging Cloud SQL's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Cloud SQL for MySQL](https://cloud.google.com/sql/mysql) to [save, load and delete langchain documents](https://python.langchain.com/docs/modules/data_connection/document_loaders/) with `MySQLLoader` and `MySQLDocumentSaver`.\n",
|
||||
"This notebook goes over how to use [Cloud SQL for MySQL](https://cloud.google.com/sql/mysql) to [save, load and delete langchain documents](/docs/modules/data_connection/document_loaders/) with `MySQLLoader` and `MySQLDocumentSaver`.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-cloud-sql-mysql-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-cloud-sql-mysql-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
@@ -22,6 +24,7 @@
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
"* [Enable the Cloud SQL Admin API.](https://console.cloud.google.com/marketplace/product/google/sqladmin.googleapis.com)\n",
|
||||
"* [Create a Cloud SQL for MySQL instance](https://cloud.google.com/sql/docs/mysql/create-instance)\n",
|
||||
"* [Create a Cloud SQL database](https://cloud.google.com/sql/docs/mysql/create-manage-databases)\n",
|
||||
"* [Add an IAM database user to the database](https://cloud.google.com/sql/docs/mysql/add-manage-iam-users#creating-a-database-user) (Optional)\n",
|
||||
@@ -137,24 +140,6 @@
|
||||
"auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### API Enablement\n",
|
||||
"The `langchain-google-cloud-sql-mysql` package requires that you [enable the Cloud SQL Admin API](https://console.cloud.google.com/flows/enableapi?apiid=sqladmin.googleapis.com) in your Google Cloud Project."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# enable Cloud SQL Admin API\n",
|
||||
"!gcloud services enable sqladmin.googleapis.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -389,7 +374,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First we prepare an example table with non-default schema, and populate it with some arbitary data."
|
||||
"First we prepare an example table with non-default schema, and populate it with some arbitrary data."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,382 +1,362 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "E_RJy7C1bpCT"
|
||||
},
|
||||
"source": [
|
||||
"# Google Cloud SQL for PostgreSQL\n",
|
||||
"\n",
|
||||
"> [Cloud SQL for PostgreSQL](https://cloud.google.com/sql/docs/postgres) is a fully-managed database service that helps you set up, maintain, manage, and administer your PostgreSQL relational databases on Google Cloud Platform. Extend your database application to build AI-powered experiences leveraging Cloud SQL for PostgreSQL's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use `Cloud SQL for PostgreSQL` to load Documents with the `PostgreSQLLoader` class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "xjcxaw6--Xyy"
|
||||
},
|
||||
"source": [
|
||||
"## Before you begin\n",
|
||||
"\n",
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
" * [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
" * [Enable the Cloud SQL Admin API.](https://console.cloud.google.com/marketplace/product/google/sqladmin.googleapis.com)\n",
|
||||
" * [Create a Cloud SQL for PostgreSQL instance.](https://cloud.google.com/sql/docs/postgres/create-instance)\n",
|
||||
" * [Create a Cloud SQL for PostgreSQL database.](https://cloud.google.com/sql/docs/postgres/create-manage-databases)\n",
|
||||
" * [Add a User to the database.](https://cloud.google.com/sql/docs/postgres/create-manage-users)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "IR54BmgvdHT_"
|
||||
},
|
||||
"source": [
|
||||
"### 🦜🔗 Library Installation\n",
|
||||
"Install the integration library, `langchain-google-cloud-sql-pg`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 1000
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "E_RJy7C1bpCT"
|
||||
},
|
||||
"source": [
|
||||
"# Google Cloud SQL for PostgreSQL\n",
|
||||
"\n",
|
||||
"> [Cloud SQL for PostgreSQL](https://cloud.google.com/sql/docs/postgres) is a fully-managed database service that helps you set up, maintain, manage, and administer your PostgreSQL relational databases on Google Cloud Platform. Extend your database application to build AI-powered experiences leveraging Cloud SQL for PostgreSQL's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use `Cloud SQL for PostgreSQL` to load Documents with the `PostgresLoader` class.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-cloud-sql-pg-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-cloud-sql-pg-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
},
|
||||
"id": "0ZITIDE160OD",
|
||||
"outputId": "90e0636e-ff34-4e1e-ad37-d2a6db4a317e"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain-google-cloud-sql-pg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "v40bB_GMcr9f"
|
||||
},
|
||||
"source": [
|
||||
"**Colab only:** Uncomment the following cell to restart the kernel or use the button to restart the kernel. For Vertex AI Workbench you can restart the terminal using the button on top."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "6o0iGVIdDD6K"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# # Automatically restart kernel after installs so that your environment can access the new packages\n",
|
||||
"# import IPython\n",
|
||||
"\n",
|
||||
"# app = IPython.Application.instance()\n",
|
||||
"# app.kernel.do_shutdown(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cTXTbj4UltKf"
|
||||
},
|
||||
"source": [
|
||||
"### 🔐 Authentication\n",
|
||||
"Authenticate to Google Cloud as the IAM user logged into this notebook in order to access your Google Cloud Project.\n",
|
||||
"\n",
|
||||
"* If you are using Colab to run this notebook, use the cell below and continue.\n",
|
||||
"* If you are using Vertex AI Workbench, check out the setup instructions [here](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/setup-env)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.colab import auth\n",
|
||||
"\n",
|
||||
"auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Uj02bMRAc9_c"
|
||||
},
|
||||
"source": [
|
||||
"### ☁ Set Your Google Cloud Project\n",
|
||||
"Set your Google Cloud project so that you can leverage Google Cloud resources within this notebook.\n",
|
||||
"\n",
|
||||
"If you don't know your project ID, try the following:\n",
|
||||
"\n",
|
||||
"* Run `gcloud config list`.\n",
|
||||
"* Run `gcloud projects list`.\n",
|
||||
"* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "xjcxaw6--Xyy"
|
||||
},
|
||||
"source": [
|
||||
"## Before you begin\n",
|
||||
"\n",
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
" * [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
" * [Enable the Cloud SQL Admin API.](https://console.cloud.google.com/marketplace/product/google/sqladmin.googleapis.com)\n",
|
||||
" * [Create a Cloud SQL for PostgreSQL instance.](https://cloud.google.com/sql/docs/postgres/create-instance)\n",
|
||||
" * [Create a Cloud SQL for PostgreSQL database.](https://cloud.google.com/sql/docs/postgres/create-manage-databases)\n",
|
||||
" * [Add a User to the database.](https://cloud.google.com/sql/docs/postgres/create-manage-users)"
|
||||
]
|
||||
},
|
||||
"id": "wnp1R1PYc9_c",
|
||||
"outputId": "6502c721-a2fd-451f-b946-9f7b850d5966"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Project { display-mode: \"form\" }\n",
|
||||
"PROJECT_ID = \"gcp_project_id\" # @param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"# Set the project id\n",
|
||||
"! gcloud config set project {PROJECT_ID}"
|
||||
]
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "IR54BmgvdHT_"
|
||||
},
|
||||
"source": [
|
||||
"### 🦜🔗 Library Installation\n",
|
||||
"Install the integration library, `langchain_google_cloud_sql_pg`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 1000
|
||||
},
|
||||
"id": "0ZITIDE160OD",
|
||||
"outputId": "90e0636e-ff34-4e1e-ad37-d2a6db4a317e"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet langchain_google_cloud_sql_pg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "v40bB_GMcr9f"
|
||||
},
|
||||
"source": [
|
||||
"**Colab only:** Uncomment the following cell to restart the kernel or use the button to restart the kernel. For Vertex AI Workbench you can restart the terminal using the button on top."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "6o0iGVIdDD6K"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# # Automatically restart kernel after installs so that your environment can access the new packages\n",
|
||||
"# import IPython\n",
|
||||
"\n",
|
||||
"# app = IPython.Application.instance()\n",
|
||||
"# app.kernel.do_shutdown(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "cTXTbj4UltKf"
|
||||
},
|
||||
"source": [
|
||||
"### 🔐 Authentication\n",
|
||||
"Authenticate to Google Cloud as the IAM user logged into this notebook in order to access your Google Cloud Project.\n",
|
||||
"\n",
|
||||
"* If you are using Colab to run this notebook, use the cell below and continue.\n",
|
||||
"* If you are using Vertex AI Workbench, check out the setup instructions [here](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/setup-env)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.colab import auth\n",
|
||||
"\n",
|
||||
"auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Uj02bMRAc9_c"
|
||||
},
|
||||
"source": [
|
||||
"### ☁ Set Your Google Cloud Project\n",
|
||||
"Set your Google Cloud project so that you can leverage Google Cloud resources within this notebook.\n",
|
||||
"\n",
|
||||
"If you don't know your project ID, try the following:\n",
|
||||
"\n",
|
||||
"* Run `gcloud config list`.\n",
|
||||
"* Run `gcloud projects list`.\n",
|
||||
"* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "wnp1R1PYc9_c",
|
||||
"outputId": "6502c721-a2fd-451f-b946-9f7b850d5966"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Project { display-mode: \"form\" }\n",
|
||||
"PROJECT_ID = \"gcp_project_id\" # @param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"# Set the project id\n",
|
||||
"! gcloud config set project {PROJECT_ID}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f8f2830ee9ca1e01",
|
||||
"metadata": {
|
||||
"id": "f8f2830ee9ca1e01"
|
||||
},
|
||||
"source": [
|
||||
"## Basic Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "OMvzMWRrR6n7",
|
||||
"metadata": {
|
||||
"id": "OMvzMWRrR6n7"
|
||||
},
|
||||
"source": [
|
||||
"### Set Cloud SQL database values\n",
|
||||
"Find your database variables, in the [Cloud SQL Instances page](https://console.cloud.google.com/sql/instances)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "irl7eMFnSPZr",
|
||||
"metadata": {
|
||||
"id": "irl7eMFnSPZr"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Set Your Values Here { display-mode: \"form\" }\n",
|
||||
"REGION = \"us-central1\" # @param {type: \"string\"}\n",
|
||||
"INSTANCE = \"my-primary\" # @param {type: \"string\"}\n",
|
||||
"DATABASE = \"my-database\" # @param {type: \"string\"}\n",
|
||||
"TABLE_NAME = \"vector_store\" # @param {type: \"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "QuQigs4UoFQ2",
|
||||
"metadata": {
|
||||
"id": "QuQigs4UoFQ2"
|
||||
},
|
||||
"source": [
|
||||
"### Cloud SQL Engine\n",
|
||||
"\n",
|
||||
"One of the requirements and arguments to establish PostgreSQL as a document loader is a `PostgresEngine` object. The `PostgresEngine` configures a connection pool to your Cloud SQL for PostgreSQL database, enabling successful connections from your application and following industry best practices.\n",
|
||||
"\n",
|
||||
"To create a `PostgresEngine` using `PostgresEngine.from_instance()` you need to provide only 4 things:\n",
|
||||
"\n",
|
||||
"1. `project_id` : Project ID of the Google Cloud Project where the Cloud SQL instance is located.\n",
|
||||
"1. `region` : Region where the Cloud SQL instance is located.\n",
|
||||
"1. `instance` : The name of the Cloud SQL instance.\n",
|
||||
"1. `database` : The name of the database to connect to on the Cloud SQL instance.\n",
|
||||
"\n",
|
||||
"By default, [IAM database authentication](https://cloud.google.com/sql/docs/postgres/iam-authentication) will be used as the method of database authentication. This library uses the IAM principal belonging to the [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials) sourced from the environment.\n",
|
||||
"\n",
|
||||
"Optionally, [built-in database authentication](https://cloud.google.com/sql/docs/postgres/users) using a username and password to access the Cloud SQL database can also be used. Just provide the optional `user` and `password` arguments to `PostgresEngine.from_instance()`:\n",
|
||||
"\n",
|
||||
"* `user` : Database user to use for built-in database authentication and login\n",
|
||||
"* `password` : Database password to use for built-in database authentication and login.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Note**: This tutorial demonstrates the async interface. All async methods have corresponding sync methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_cloud_sql_pg import PostgresEngine\n",
|
||||
"\n",
|
||||
"engine = await PostgresEngine.afrom_instance(\n",
|
||||
" project_id=PROJECT_ID,\n",
|
||||
" region=REGION,\n",
|
||||
" instance=INSTANCE,\n",
|
||||
" database=DATABASE,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "e1tl0aNx7SWy"
|
||||
},
|
||||
"source": [
|
||||
"### Create PostgresLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "z-AZyzAQ7bsf"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_cloud_sql_pg import PostgresLoader\n",
|
||||
"\n",
|
||||
"# Creating a basic PostgreSQL object\n",
|
||||
"loader = await PostgresLoader.create(engine, table_name=TABLE_NAME)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "PeOMpftjc9_e"
|
||||
},
|
||||
"source": [
|
||||
"### Load Documents via default table\n",
|
||||
"The loader returns a list of Documents from the table using the first column as page_content and all other columns as metadata. The default table will have the first column as\n",
|
||||
"page_content and the second column as metadata (JSON). Each row becomes a document. Please note that if you want your documents to have ids you will need to add them in."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "cwvi_O5Wc9_e"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_cloud_sql_pg import PostgresLoader\n",
|
||||
"\n",
|
||||
"# Creating a basic PostgresLoader object\n",
|
||||
"loader = await PostgresLoader.create(engine, table_name=TABLE_NAME)\n",
|
||||
"\n",
|
||||
"docs = await loader.aload()\n",
|
||||
"print(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "kSkL9l1Hc9_e"
|
||||
},
|
||||
"source": [
|
||||
"### Load documents via custom table/metadata or custom page content columns"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = await PostgresLoader.create(\n",
|
||||
" engine,\n",
|
||||
" table_name=TABLE_NAME,\n",
|
||||
" content_columns=[\"product_name\"], # Optional\n",
|
||||
" metadata_columns=[\"id\"], # Optional\n",
|
||||
")\n",
|
||||
"docs = await loader.aload()\n",
|
||||
"print(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5R6h0_Cvc9_f"
|
||||
},
|
||||
"source": [
|
||||
"### Set page content format\n",
|
||||
"The loader returns a list of Documents, with one document per row, with page content in specified string format, i.e. text (space separated concatenation), JSON, YAML, CSV, etc. JSON and YAML formats include headers, while text and CSV do not include field headers.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "NGNdS7cqc9_f"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = await PostgresLoader.create(\n",
|
||||
" engine,\n",
|
||||
" table_name=\"products\",\n",
|
||||
" content_columns=[\"product_name\", \"description\"],\n",
|
||||
" format=\"YAML\",\n",
|
||||
")\n",
|
||||
"docs = await loader.aload()\n",
|
||||
"print(docs)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"toc_visible": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "rEWWNoNnKOgq",
|
||||
"metadata": {
|
||||
"id": "rEWWNoNnKOgq"
|
||||
},
|
||||
"source": [
|
||||
"### 💡 API Enablement\n",
|
||||
"The `langchain_google_cloud_sql_pg` package requires that you [enable the Cloud SQL Admin API](https://console.cloud.google.com/flows/enableapi?apiid=sqladmin.googleapis.com) in your Google Cloud Project."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "5utKIdq7KYi5",
|
||||
"metadata": {
|
||||
"id": "5utKIdq7KYi5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# enable Cloud SQL Admin API\n",
|
||||
"!gcloud services enable sqladmin.googleapis.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f8f2830ee9ca1e01",
|
||||
"metadata": {
|
||||
"id": "f8f2830ee9ca1e01"
|
||||
},
|
||||
"source": [
|
||||
"## Basic Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "OMvzMWRrR6n7",
|
||||
"metadata": {
|
||||
"id": "OMvzMWRrR6n7"
|
||||
},
|
||||
"source": [
|
||||
"### Set Cloud SQL database values\n",
|
||||
"Find your database variables, in the [Cloud SQL Instances page](https://console.cloud.google.com/sql/instances)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "irl7eMFnSPZr",
|
||||
"metadata": {
|
||||
"id": "irl7eMFnSPZr"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @title Set Your Values Here { display-mode: \"form\" }\n",
|
||||
"REGION = \"us-central1\" # @param {type: \"string\"}\n",
|
||||
"INSTANCE = \"my-primary\" # @param {type: \"string\"}\n",
|
||||
"DATABASE = \"my-database\" # @param {type: \"string\"}\n",
|
||||
"TABLE_NAME = \"vector_store\" # @param {type: \"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "QuQigs4UoFQ2",
|
||||
"metadata": {
|
||||
"id": "QuQigs4UoFQ2"
|
||||
},
|
||||
"source": [
|
||||
"### Cloud SQL Engine\n",
|
||||
"\n",
|
||||
"One of the requirements and arguments to establish PostgreSQL as a document loader is a `PostgresEngine` object. The `PostgresEngine` configures a connection pool to your Cloud SQL for PostgreSQL database, enabling successful connections from your application and following industry best practices.\n",
|
||||
"\n",
|
||||
"To create a `PostgresEngine` using `PostgresEngine.from_instance()` you need to provide only 4 things:\n",
|
||||
"\n",
|
||||
"1. `project_id` : Project ID of the Google Cloud Project where the Cloud SQL instance is located.\n",
|
||||
"1. `region` : Region where the Cloud SQL instance is located.\n",
|
||||
"1. `instance` : The name of the Cloud SQL instance.\n",
|
||||
"1. `database` : The name of the database to connect to on the Cloud SQL instance.\n",
|
||||
"\n",
|
||||
"By default, [IAM database authentication](https://cloud.google.com/sql/docs/postgres/iam-authentication) will be used as the method of database authentication. This library uses the IAM principal belonging to the [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials) sourced from the environment.\n",
|
||||
"\n",
|
||||
"Optionally, [built-in database authentication](https://cloud.google.com/sql/docs/postgres/users) using a username and password to access the Cloud SQL database can also be used. Just provide the optional `user` and `password` arguments to `PostgresEngine.from_instance()`:\n",
|
||||
"\n",
|
||||
"* `user` : Database user to use for built-in database authentication and login\n",
|
||||
"* `password` : Database password to use for built-in database authentication and login.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Note**: This tutorial demonstrates the async interface. All async methods have corresponding sync methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_cloud_sql_pg import PostgresEngine\n",
|
||||
"\n",
|
||||
"engine = await PostgresEngine.afrom_instance(\n",
|
||||
" project_id=PROJECT_ID,\n",
|
||||
" region=REGION,\n",
|
||||
" instance=INSTANCE,\n",
|
||||
" database=DATABASE,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "e1tl0aNx7SWy"
|
||||
},
|
||||
"source": [
|
||||
"### Create PostgresLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "z-AZyzAQ7bsf"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_cloud_sql_pg import PostgresLoader\n",
|
||||
"\n",
|
||||
"# Creating a basic PostgreSQL object\n",
|
||||
"loader = await PostgresLoader.create(engine, table_name=TABLE_NAME)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "PeOMpftjc9_e"
|
||||
},
|
||||
"source": [
|
||||
"### Load Documents via default table\n",
|
||||
"The loader returns a list of Documents from the table using the first column as page_content and all other columns as metadata. The default table will have the first column as\n",
|
||||
"page_content and the second column as metadata (JSON). Each row becomes a document. Please note that if you want your documents to have ids you will need to add them in."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "cwvi_O5Wc9_e"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_cloud_sql_pg import PostgresLoader\n",
|
||||
"\n",
|
||||
"# Creating a basic PostgresLoader object\n",
|
||||
"loader = await PostgresLoader.create(engine, table_name=TABLE_NAME)\n",
|
||||
"\n",
|
||||
"docs = await loader.aload()\n",
|
||||
"print(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "kSkL9l1Hc9_e"
|
||||
},
|
||||
"source": [
|
||||
"### Load documents via custom table/metadata or custom page content columns"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = await PostgresLoader.create(\n",
|
||||
" engine,\n",
|
||||
" table_name=TABLE_NAME,\n",
|
||||
" content_columns=[\"product_name\"], # Optional\n",
|
||||
" metadata_columns=[\"id\"], # Optional\n",
|
||||
")\n",
|
||||
"docs = await loader.aload()\n",
|
||||
"print(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "5R6h0_Cvc9_f"
|
||||
},
|
||||
"source": [
|
||||
"### Set page content format\n",
|
||||
"The loader returns a list of Documents, with one document per row, with page content in specified string format, i.e. text (space separated concatenation), JSON, YAML, CSV, etc. JSON and YAML formats include headers, while text and CSV do not include field headers.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "NGNdS7cqc9_f"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = await PostgresLoader.create(\n",
|
||||
" engine,\n",
|
||||
" table_name=\"products\",\n",
|
||||
" content_columns=[\"product_name\", \"description\"],\n",
|
||||
" format=\"YAML\",\n",
|
||||
")\n",
|
||||
"docs = await loader.aload()\n",
|
||||
"print(docs)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"toc_visible": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,411 +1,336 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Firestore in Datastore mode\n",
|
||||
"\n",
|
||||
"> [Firestore in Datastore mode](https://cloud.google.com/datastore) is a serverless document-oriented database that scales to meet any demand. Extend your database application to build AI-powered experiences leveraging Datastore's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Firestore in Datastore mode](https://cloud.google.com/datastore) to [save, load and delete langchain documents](https://python.langchain.com/docs/modules/data_connection/document_loaders/) with `DatastoreLoader` and `DatastoreSaver`.\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-datastore-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Firestore in Datastore Mode\n",
|
||||
"\n",
|
||||
"> [Firestore in Datastore Mode](https://cloud.google.com/datastore) is a NoSQL document database built for automatic scaling, high performance and ease of application development. Extend your database application to build AI-powered experiences leveraging Datastore's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Firestore in Datastore Mode](https://cloud.google.com/datastore) to [save, load and delete langchain documents](/docs/modules/data_connection/document_loaders/) with `DatastoreLoader` and `DatastoreSaver`.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-datastore-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-datastore-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Before You Begin\n",
|
||||
"\n",
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
"* [Enable the Datastore API](https://console.cloud.google.com/flows/enableapi?apiid=datastore.googleapis.com)\n",
|
||||
"* [Create a Firestore in Datastore Mode database](https://cloud.google.com/datastore/docs/manage-databases)\n",
|
||||
"\n",
|
||||
"After confirmed access to database in the runtime environment of this notebook, filling the following values and run the cell before running example scripts."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 🦜🔗 Library Installation\n",
|
||||
"\n",
|
||||
"The integration lives in its own `langchain-google-datastore` package, so we need to install it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -upgrade --quiet langchain-google-datastore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Colab only**: Uncomment the following cell to restart the kernel or use the button to restart the kernel. For Vertex AI Workbench you can restart the terminal using the button on top."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# # Automatically restart kernel after installs so that your environment can access the new packages\n",
|
||||
"# import IPython\n",
|
||||
"\n",
|
||||
"# app = IPython.Application.instance()\n",
|
||||
"# app.kernel.do_shutdown(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ☁ Set Your Google Cloud Project\n",
|
||||
"Set your Google Cloud project so that you can leverage Google Cloud resources within this notebook.\n",
|
||||
"\n",
|
||||
"If you don't know your project ID, try the following:\n",
|
||||
"\n",
|
||||
"* Run `gcloud config list`.\n",
|
||||
"* Run `gcloud projects list`.\n",
|
||||
"* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @markdown Please fill in the value below with your Google Cloud project ID and then run the cell.\n",
|
||||
"\n",
|
||||
"PROJECT_ID = \"my-project-id\" # @param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"# Set the project id\n",
|
||||
"!gcloud config set project {PROJECT_ID}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 🔐 Authentication\n",
|
||||
"\n",
|
||||
"Authenticate to Google Cloud as the IAM user logged into this notebook in order to access your Google Cloud Project.\n",
|
||||
"\n",
|
||||
"- If you are using Colab to run this notebook, use the cell below and continue.\n",
|
||||
"- If you are using Vertex AI Workbench, check out the setup instructions [here](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/setup-env)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.colab import auth\n",
|
||||
"\n",
|
||||
"auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Save documents\n",
|
||||
"\n",
|
||||
"Save langchain documents with `DatastoreSaver.upsert_documents(<documents>)`. By default it will try to extract the entity key from the `key` in the Document metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_google_datastore import DatastoreSaver\n",
|
||||
"\n",
|
||||
"saver = DatastoreSaver()\n",
|
||||
"\n",
|
||||
"data = [Document(page_content=\"Hello, World!\")]\n",
|
||||
"saver.upsert_documents(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Save documents without key\n",
|
||||
"\n",
|
||||
"If a `kind` is specified the documents will be stored with an auto generated id."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"saver = DatastoreSaver(\"MyKind\")\n",
|
||||
"\n",
|
||||
"saver.upsert_documents(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load documents via Kind\n",
|
||||
"\n",
|
||||
"Load langchain documents with `DatastoreLoader.load()` or `DatastoreLoader.lazy_load()`. `lazy_load` returns a generator that only queries database during the iteration. To initialize `DatastoreLoader` class you need to provide:\n",
|
||||
"1. `source` - The source to load the documents. It can be an instance of Query or the name of the Datastore kind to read from."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_datastore import DatastoreLoader\n",
|
||||
"\n",
|
||||
"loader = DatastoreLoader(\"MyKind\")\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load documents via query\n",
|
||||
"\n",
|
||||
"Other than loading documents from kind, we can also choose to load documents from query. For example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.cloud import datastore\n",
|
||||
"\n",
|
||||
"client = datastore.Client(database=\"non-default-db\", namespace=\"custom_namespace\")\n",
|
||||
"query_load = client.query(kind=\"MyKind\")\n",
|
||||
"query_load.add_filter(\"region\", \"=\", \"west_coast\")\n",
|
||||
"\n",
|
||||
"loader_document = DatastoreLoader(query_load)\n",
|
||||
"\n",
|
||||
"data = loader_document.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Delete documents\n",
|
||||
"\n",
|
||||
"Delete a list of langchain documents from Datastore with `DatastoreSaver.delete_documents(<documents>)`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"saver = DatastoreSaver()\n",
|
||||
"\n",
|
||||
"saver.delete_documents(data)\n",
|
||||
"\n",
|
||||
"keys_to_delete = [\n",
|
||||
" [\"Kind1\", \"identifier\"],\n",
|
||||
" [\"Kind2\", 123],\n",
|
||||
" [\"Kind3\", \"identifier\", \"NestedKind\", 456],\n",
|
||||
"]\n",
|
||||
"# The Documents will be ignored and only the document ids will be used.\n",
|
||||
"saver.delete_documents(data, keys_to_delete)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Advanced Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load documents with customized document page content & metadata\n",
|
||||
"\n",
|
||||
"The arguments of `page_content_properties` and `metadata_properties` will specify the Entity properties to be written into LangChain Document `page_content` and `metadata`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = DatastoreLoader(\n",
|
||||
" source=\"MyKind\",\n",
|
||||
" page_content_fields=[\"data_field\"],\n",
|
||||
" metadata_fields=[\"metadata_field\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Customize Page Content Format\n",
|
||||
"\n",
|
||||
"When the `page_content` contains only one field the information will be the field value only. Otherwise the `page_content` will be in JSON format."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Customize Connection & Authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.auth import compute_engine\n",
|
||||
"from google.cloud.firestore import Client\n",
|
||||
"\n",
|
||||
"client = Client(database=\"non-default-db\", creds=compute_engine.Credentials())\n",
|
||||
"loader = DatastoreLoader(\n",
|
||||
" source=\"foo\",\n",
|
||||
" client=client,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Before You Begin\n",
|
||||
"\n",
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
"* [Create a Datastore database](https://cloud.google.com/datastore/docs/manage-databases)\n",
|
||||
"\n",
|
||||
"After confirmed access to database in the runtime environment of this notebook, filling the following values and run the cell before running example scripts."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @markdown Please specify a source for demo purpose.\n",
|
||||
"SOURCE = \"test\" # @param {type:\"Query\"|\"CollectionGroup\"|\"DocumentReference\"|\"string\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 🦜🔗 Library Installation\n",
|
||||
"\n",
|
||||
"The integration lives in its own `langchain-google-datastore` package, so we need to install it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -upgrade --quiet langchain-google-datastore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Colab only**: Uncomment the following cell to restart the kernel or use the button to restart the kernel. For Vertex AI Workbench you can restart the terminal using the button on top."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# # Automatically restart kernel after installs so that your environment can access the new packages\n",
|
||||
"# import IPython\n",
|
||||
"\n",
|
||||
"# app = IPython.Application.instance()\n",
|
||||
"# app.kernel.do_shutdown(True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ☁ Set Your Google Cloud Project\n",
|
||||
"Set your Google Cloud project so that you can leverage Google Cloud resources within this notebook.\n",
|
||||
"\n",
|
||||
"If you don't know your project ID, try the following:\n",
|
||||
"\n",
|
||||
"* Run `gcloud config list`.\n",
|
||||
"* Run `gcloud projects list`.\n",
|
||||
"* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# @markdown Please fill in the value below with your Google Cloud project ID and then run the cell.\n",
|
||||
"\n",
|
||||
"PROJECT_ID = \"my-project-id\" # @param {type:\"string\"}\n",
|
||||
"\n",
|
||||
"# Set the project id\n",
|
||||
"!gcloud config set project {PROJECT_ID}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 🔐 Authentication\n",
|
||||
"\n",
|
||||
"Authenticate to Google Cloud as the IAM user logged into this notebook in order to access your Google Cloud Project.\n",
|
||||
"\n",
|
||||
"- If you are using Colab to run this notebook, use the cell below and continue.\n",
|
||||
"- If you are using Vertex AI Workbench, check out the setup instructions [here](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/setup-env)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.colab import auth\n",
|
||||
"\n",
|
||||
"auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### API Enablement\n",
|
||||
"The `langchain-google-datastore` package requires that you [enable the Datastore API](https://console.cloud.google.com/flows/enableapi?apiid=datastore.googleapis.com) in your Google Cloud Project."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# enable Datastore API\n",
|
||||
"!gcloud services enable datastore.googleapis.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Save documents\n",
|
||||
"\n",
|
||||
"`DatastoreSaver` can store Documents into Datastore. By default it will try to extract the Document reference from the metadata\n",
|
||||
"\n",
|
||||
"Save langchain documents with `DatastoreSaver.upsert_documents(<documents>)`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_google_datastore import DatastoreSaver\n",
|
||||
"\n",
|
||||
"data = [Document(page_content=\"Hello, World!\")]\n",
|
||||
"saver = DatastoreSaver()\n",
|
||||
"saver.upsert_documents(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Save documents without reference\n",
|
||||
"\n",
|
||||
"If a collection is specified the documents will be stored with an auto generated id."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"saver = DatastoreSaver(\"Collection\")\n",
|
||||
"\n",
|
||||
"saver.upsert_documents(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Save documents with other references"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc_ids = [\"AnotherCollection/doc_id\", \"foo/bar\"]\n",
|
||||
"saver = DatastoreSaver()\n",
|
||||
"\n",
|
||||
"saver.upsert_documents(documents=data, document_ids=doc_ids)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load from Collection or SubCollection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load langchain documents with `DatastoreLoader.load()` or `Datastore.lazy_load()`. `lazy_load` returns a generator that only queries database during the iteration. To initialize `DatastoreLoader` class you need to provide:\n",
|
||||
"\n",
|
||||
"1. `source` - An instance of a Query, CollectionGroup, DocumentReference or the single `\\`-delimited path to a Datastore collection`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_datastore import DatastoreLoader\n",
|
||||
"\n",
|
||||
"loader_collection = DatastoreLoader(\"Collection\")\n",
|
||||
"loader_subcollection = DatastoreLoader(\"Collection/doc/SubCollection\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"data_collection = loader_collection.load()\n",
|
||||
"data_subcollection = loader_subcollection.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load a single Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.cloud import datastore\n",
|
||||
"\n",
|
||||
"client = datastore.Client()\n",
|
||||
"doc_ref = client.collection(\"foo\").document(\"bar\")\n",
|
||||
"\n",
|
||||
"loader_document = DatastoreLoader(doc_ref)\n",
|
||||
"\n",
|
||||
"data = loader_document.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load from CollectionGroup or Query"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.cloud.datastore import CollectionGroup, FieldFilter, Query\n",
|
||||
"\n",
|
||||
"col_ref = client.collection(\"col_group\")\n",
|
||||
"collection_group = CollectionGroup(col_ref)\n",
|
||||
"\n",
|
||||
"loader_group = DatastoreLoader(collection_group)\n",
|
||||
"\n",
|
||||
"col_ref = client.collection(\"collection\")\n",
|
||||
"query = col_ref.where(filter=FieldFilter(\"region\", \"==\", \"west_coast\"))\n",
|
||||
"\n",
|
||||
"loader_query = DatastoreLoader(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Delete documents\n",
|
||||
"\n",
|
||||
"Delete a list of langchain documents from Datastore collection with `DatastoreSaver.delete_documents(<documents>)`.\n",
|
||||
"\n",
|
||||
"If document ids is provided, the Documents will be ignored."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"saver = DatastoreSaver()\n",
|
||||
"\n",
|
||||
"saver.delete_documents(data)\n",
|
||||
"\n",
|
||||
"# The Documents will be ignored and only the document ids will be used.\n",
|
||||
"saver.delete_documents(data, doc_ids)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Advanced Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load documents with customize document page content & metadata\n",
|
||||
"\n",
|
||||
"The arguments of `page_content_fields` and `metadata_fields` will specify the Datastore Document fields to be written into LangChain Document `page_content` and `metadata`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = DatastoreLoader(\n",
|
||||
" source=\"foo/bar/subcol\",\n",
|
||||
" page_content_fields=[\"data_field\"],\n",
|
||||
" metadata_fields=[\"metadata_field\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Customize Page Content Format\n",
|
||||
"\n",
|
||||
"When the `page_content` contains only one field the information will be the field value only. Otherwise the `page_content` will be in JSON format."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Customize Connection & Authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.auth import compute_engine\n",
|
||||
"from google.cloud.datastore import Client\n",
|
||||
"\n",
|
||||
"client = Client(database=\"non-default-db\", creds=compute_engine.Credentials())\n",
|
||||
"loader = DatastoreLoader(\n",
|
||||
" source=\"foo\",\n",
|
||||
" client=client,\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,9 @@
|
||||
"\n",
|
||||
"> [Firestore](https://cloud.google.com/firestore) is a serverless document-oriented database that scales to meet any demand. Extend your database application to build AI-powered experiences leveraging Firestore's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Firestore](https://cloud.google.com/firestore) to [save, load and delete langchain documents](https://python.langchain.com/docs/modules/data_connection/document_loaders/) with `FirestoreLoader` and `FirestoreSaver`.\n",
|
||||
"This notebook goes over how to use [Firestore](https://cloud.google.com/firestore) to [save, load and delete langchain documents](/docs/modules/data_connection/document_loaders/) with `FirestoreLoader` and `FirestoreSaver`.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-firestore-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-firestore-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
@@ -22,6 +24,7 @@
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
"* [Enable the Firestore API](https://console.cloud.google.com/flows/enableapi?apiid=firestore.googleapis.com)\n",
|
||||
"* [Create a Firestore database](https://cloud.google.com/firestore/docs/manage-databases)\n",
|
||||
"\n",
|
||||
"After confirmed access to database in the runtime environment of this notebook, filling the following values and run the cell before running example scripts."
|
||||
@@ -128,24 +131,6 @@
|
||||
"auth.authenticate_user()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### API Enablement\n",
|
||||
"The `langchain-google-firestore` package requires that you [enable the Firestore Admin API](https://console.cloud.google.com/flows/enableapi?apiid=firestore.googleapis.com) in your Google Cloud Project."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# enable Firestore Admin API\n",
|
||||
"!gcloud services enable firestore.googleapis.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -170,7 +155,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents.base import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_google_firestore import FirestoreSaver\n",
|
||||
"\n",
|
||||
"saver = FirestoreSaver()\n",
|
||||
@@ -232,7 +217,7 @@
|
||||
"source": [
|
||||
"Load langchain documents with `FirestoreLoader.load()` or `Firestore.lazy_load()`. `lazy_load` returns a generator that only queries database during the iteration. To initialize `FirestoreLoader` class you need to provide:\n",
|
||||
"\n",
|
||||
"1. `source` - An instance of a Query, CollectionGroup, DocumentReference or the single `\\`-delimited path to a Firestore collection`."
|
||||
"1. `source` - An instance of a Query, CollectionGroup, DocumentReference or the single `\\`-delimited path to a Firestore collection."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -10,7 +10,9 @@
|
||||
"\n",
|
||||
"> [Google Memorystore for Redis](https://cloud.google.com/memorystore/docs/redis/memorystore-for-redis-overview) is a fully-managed service that is powered by the Redis in-memory data store to build application caches that provide sub-millisecond data access. Extend your database application to build AI-powered experiences leveraging Memorystore for Redis's Langchain integrations.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Memorystore for Redis](https://cloud.google.com/memorystore/docs/redis/memorystore-for-redis-overview) to [save, load and delete langchain documents](https://python.langchain.com/docs/modules/data_connection/document_loaders/) with `MemorystoreDocumentLoader` and `MemorystoreDocumentSaver`.\n",
|
||||
"This notebook goes over how to use [Memorystore for Redis](https://cloud.google.com/memorystore/docs/redis/memorystore-for-redis-overview) to [save, load and delete langchain documents](/docs/modules/data_connection/document_loaders/) with `MemorystoreDocumentLoader` and `MemorystoreDocumentSaver`.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-memorystore-redis-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-memorystore-redis-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
@@ -24,6 +26,7 @@
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
"* [Enable the Memorystore for Redis API](https://console.cloud.google.com/flows/enableapi?apiid=redis.googleapis.com)\n",
|
||||
"* [Create a Memorystore for Redis instance](https://cloud.google.com/memorystore/docs/redis/create-instance-console). Ensure that the version is greater than or equal to 5.0.\n",
|
||||
"\n",
|
||||
"After confirmed access to database in the runtime environment of this notebook, filling the following values and run the cell before running example scripts."
|
||||
@@ -159,7 +162,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import redis\n",
|
||||
"from langchain_core.documents.base import Document\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_google_memorystore_redis import MemorystoreDocumentSaver\n",
|
||||
"\n",
|
||||
"test_docs = [\n",
|
||||
|
||||
@@ -6,9 +6,11 @@
|
||||
"source": [
|
||||
"# Google Spanner\n",
|
||||
"\n",
|
||||
"> [Spanner](https://cloud.google.com/spanner) is a highly scalable database that combines unlimited scalability with relational semantics, such as secondary indexes, strong consistency, schemas, and SQL providing 99.999% availability in one easy solution. Extend your database application to build AI-powered experiences leveraging Spanner's Langchain integrations.\n",
|
||||
"> [Spanner](https://cloud.google.com/spanner) is a highly scalable database that combines unlimited scalability with relational semantics, such as secondary indexes, strong consistency, schemas, and SQL providing 99.999% availability in one easy solution.\n",
|
||||
"\n",
|
||||
"This notebook goes over how to use [Spanner](https://cloud.google.com/spanner) to [save, load and delete langchain documents](https://python.langchain.com/docs/modules/data_connection/document_loaders/) with `SpannerLoader` and `SpannerDocumentSaver`.\n",
|
||||
"This notebook goes over how to use [Spanner](https://cloud.google.com/spanner) to [save, load and delete langchain documents](/docs/modules/data_connection/document_loaders/) with `SpannerLoader` and `SpannerDocumentSaver`.\n",
|
||||
"\n",
|
||||
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-spanner-python/).\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/googleapis/langchain-google-spanner-python/blob/main/docs/document_loader.ipynb)"
|
||||
]
|
||||
@@ -22,6 +24,7 @@
|
||||
"To run this notebook, you will need to do the following:\n",
|
||||
"\n",
|
||||
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
|
||||
"* [Enable the Cloud Spanner API](https://console.cloud.google.com/flows/enableapi?apiid=spanner.googleapis.com)\n",
|
||||
"* [Create a Spanner instance](https://cloud.google.com/spanner/docs/create-manage-instances)\n",
|
||||
"* [Create a Spanner database](https://cloud.google.com/spanner/docs/create-manage-databases)\n",
|
||||
"* [Create a Spanner table](https://cloud.google.com/spanner/docs/create-query-database-console#create-schema)\n",
|
||||
@@ -58,7 +61,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -upgrade --quiet langchain-google-spanner"
|
||||
"%pip install -upgrade --quiet langchain-google-spanner langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -256,6 +259,34 @@
|
||||
"## Advanced Usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Custom client\n",
|
||||
"\n",
|
||||
"The client created by default is the default client. To pass in `credentials` and `project` explicitly, a custom client can be passed to the constructor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.cloud import spanner\n",
|
||||
"from google.oauth2 import service_account\n",
|
||||
"\n",
|
||||
"creds = service_account.Credentials.from_service_account_file(\"/path/to/key.json\")\n",
|
||||
"custom_client = spanner.Client(project=\"my-project\", credentials=creds)\n",
|
||||
"loader = SpannerLoader(\n",
|
||||
" INSTANCE_ID,\n",
|
||||
" DATABASE_ID,\n",
|
||||
" query,\n",
|
||||
" client=custom_client,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -412,9 +443,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from google.cloud import spanner\n",
|
||||
"from google.oauth2 import service_account\n",
|
||||
"\n",
|
||||
"creds = service_account.Credentials.from_service_account_file(\"/path/to/key.json\")\n",
|
||||
"custom_client = spanner.Client(project=\"my-project\", credentials=creds)\n",
|
||||
"saver = SpannerDocumentSaver(\n",
|
||||
" INSTANCE_ID,\n",
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"---\n",
|
||||
"The best approach is to install Grobid via docker, see https://grobid.readthedocs.io/en/latest/Grobid-docker/. \n",
|
||||
"\n",
|
||||
"(Note: additional instructions can be found [here](https://python.langchain.com/docs/docs/integrations/providers/grobid.mdx).)\n",
|
||||
"(Note: additional instructions can be found [here](/docs/integrations/providers/grobid).)\n",
|
||||
"\n",
|
||||
"Once grobid is up-and-running you can interact as described below. \n"
|
||||
]
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
"* MongoDB database name\n",
|
||||
"* MongoDB collection name\n",
|
||||
"* (Optional) Content Filter dictionary\n",
|
||||
"* (Optional) List of field names to include in the output\n",
|
||||
"\n",
|
||||
"The output takes the following format:\n",
|
||||
"\n",
|
||||
@@ -59,7 +60,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -71,7 +72,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -80,7 +81,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -89,21 +90,22 @@
|
||||
" db_name=\"sample_restaurants\",\n",
|
||||
" collection_name=\"restaurants\",\n",
|
||||
" filter_criteria={\"borough\": \"Bronx\", \"cuisine\": \"Bakery\"},\n",
|
||||
" field_names=[\"name\", \"address\"],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"25359"
|
||||
"71"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -116,16 +118,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content=\"{'_id': ObjectId('5eb3d668b31de5d588f4292a'), 'address': {'building': '2780', 'coord': [-73.98241999999999, 40.579505], 'street': 'Stillwell Avenue', 'zipcode': '11224'}, 'borough': 'Brooklyn', 'cuisine': 'American', 'grades': [{'date': datetime.datetime(2014, 6, 10, 0, 0), 'grade': 'A', 'score': 5}, {'date': datetime.datetime(2013, 6, 5, 0, 0), 'grade': 'A', 'score': 7}, {'date': datetime.datetime(2012, 4, 13, 0, 0), 'grade': 'A', 'score': 12}, {'date': datetime.datetime(2011, 10, 12, 0, 0), 'grade': 'A', 'score': 12}], 'name': 'Riviera Caterer', 'restaurant_id': '40356018'}\", metadata={'database': 'sample_restaurants', 'collection': 'restaurants'})"
|
||||
"Document(page_content=\"Morris Park Bake Shop {'building': '1007', 'coord': [-73.856077, 40.848447], 'street': 'Morris Park Ave', 'zipcode': '10462'}\", metadata={'database': 'sample_restaurants', 'collection': 'restaurants'})"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# TiDB\n",
|
||||
"\n",
|
||||
"> [TiDB](https://github.com/pingcap/tidb) is an open-source, cloud-native, distributed, MySQL-Compatible database for elastic scale and real-time analytics.\n",
|
||||
"> [TiDB Cloud](https://tidbcloud.com/), is a comprehensive Database-as-a-Service (DBaaS) solution, that provides dedicated and serverless options. TiDB Serverless is now integrating a built-in vector search into the MySQL landscape. With this enhancement, you can seamlessly develop AI applications using TiDB Serverless without the need for a new database or additional technical stacks. Be among the first to experience it by joining the waitlist for the private beta at https://tidb.cloud/ai.\n",
|
||||
"\n",
|
||||
"This notebook introduces how to use `TiDBLoader` to load data from TiDB in langchain."
|
||||
]
|
||||
|
||||
@@ -39,9 +39,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = ToMarkdownLoader(\n",
|
||||
" url=\"https://python.langchain.com/docs/get_started/introduction\", api_key=api_key\n",
|
||||
")"
|
||||
"loader = ToMarkdownLoader(url=\"/docs/get_started/introduction\", api_key=api_key)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -72,9 +70,9 @@
|
||||
"This framework consists of several parts.\n",
|
||||
"\n",
|
||||
"- **LangChain Libraries**: The Python and JavaScript libraries. Contains interfaces and integrations for a myriad of components, a basic run time for combining these components into chains and agents, and off-the-shelf implementations of chains and agents.\n",
|
||||
"- **[LangChain Templates](https://python.langchain.com/docs/templates)**: A collection of easily deployable reference architectures for a wide variety of tasks.\n",
|
||||
"- **[LangServe](https://python.langchain.com/docs/langserve)**: A library for deploying LangChain chains as a REST API.\n",
|
||||
"- **[LangSmith](https://python.langchain.com/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.\n",
|
||||
"- **[LangChain Templates](/docs/templates)**: A collection of easily deployable reference architectures for a wide variety of tasks.\n",
|
||||
"- **[LangServe](/docs/langserve)**: A library for deploying LangChain chains as a REST API.\n",
|
||||
"- **[LangSmith](/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -101,11 +99,11 @@
|
||||
"\n",
|
||||
"## Get started [](\\#get-started \"Direct link to Get started\")\n",
|
||||
"\n",
|
||||
"[Here’s](https://python.langchain.com/docs/get_started/installation) how to install LangChain, set up your environment, and start building.\n",
|
||||
"[Here’s](/docs/get_started/installation) how to install LangChain, set up your environment, and start building.\n",
|
||||
"\n",
|
||||
"We recommend following our [Quickstart](https://python.langchain.com/docs/get_started/quickstart) guide to familiarize yourself with the framework by building your first LangChain application.\n",
|
||||
"We recommend following our [Quickstart](/docs/get_started/quickstart) guide to familiarize yourself with the framework by building your first LangChain application.\n",
|
||||
"\n",
|
||||
"Read up on our [Security](https://python.langchain.com/docs/security) best practices to make sure you're developing safely with LangChain.\n",
|
||||
"Read up on our [Security](/docs/security) best practices to make sure you're developing safely with LangChain.\n",
|
||||
"\n",
|
||||
"note\n",
|
||||
"\n",
|
||||
@@ -115,43 +113,43 @@
|
||||
"\n",
|
||||
"LCEL is a declarative way to compose chains. LCEL was designed from day 1 to support putting prototypes in production, with no code changes, from the simplest “prompt + LLM” chain to the most complex chains.\n",
|
||||
"\n",
|
||||
"- **[Overview](https://python.langchain.com/docs/expression_language/)**: LCEL and its benefits\n",
|
||||
"- **[Interface](https://python.langchain.com/docs/expression_language/interface)**: The standard interface for LCEL objects\n",
|
||||
"- **[How-to](https://python.langchain.com/docs/expression_language/how_to)**: Key features of LCEL\n",
|
||||
"- **[Cookbook](https://python.langchain.com/docs/expression_language/cookbook)**: Example code for accomplishing common tasks\n",
|
||||
"- **[Overview](/docs/expression_language/)**: LCEL and its benefits\n",
|
||||
"- **[Interface](/docs/expression_language/interface)**: The standard interface for LCEL objects\n",
|
||||
"- **[How-to](/docs/expression_language/how_to)**: Key features of LCEL\n",
|
||||
"- **[Cookbook](/docs/expression_language/cookbook)**: Example code for accomplishing common tasks\n",
|
||||
"\n",
|
||||
"## Modules [](\\#modules \"Direct link to Modules\")\n",
|
||||
"\n",
|
||||
"LangChain provides standard, extendable interfaces and integrations for the following modules:\n",
|
||||
"\n",
|
||||
"#### [Model I/O](https://python.langchain.com/docs/modules/model_io/) [](\\#model-io \"Direct link to model-io\")\n",
|
||||
"#### [Model I/O](/docs/modules/model_io/) [](\\#model-io \"Direct link to model-io\")\n",
|
||||
"\n",
|
||||
"Interface with language models\n",
|
||||
"\n",
|
||||
"#### [Retrieval](https://python.langchain.com/docs/modules/data_connection/) [](\\#retrieval \"Direct link to retrieval\")\n",
|
||||
"#### [Retrieval](/docs/modules/data_connection/) [](\\#retrieval \"Direct link to retrieval\")\n",
|
||||
"\n",
|
||||
"Interface with application-specific data\n",
|
||||
"\n",
|
||||
"#### [Agents](https://python.langchain.com/docs/modules/agents/) [](\\#agents \"Direct link to agents\")\n",
|
||||
"#### [Agents](/docs/modules/agents/) [](\\#agents \"Direct link to agents\")\n",
|
||||
"\n",
|
||||
"Let models choose which tools to use given high-level directives\n",
|
||||
"\n",
|
||||
"## Examples, ecosystem, and resources [](\\#examples-ecosystem-and-resources \"Direct link to Examples, ecosystem, and resources\")\n",
|
||||
"\n",
|
||||
"### [Use cases](https://python.langchain.com/docs/use_cases/question_answering/) [](\\#use-cases \"Direct link to use-cases\")\n",
|
||||
"### [Use cases](/docs/use_cases/question_answering/) [](\\#use-cases \"Direct link to use-cases\")\n",
|
||||
"\n",
|
||||
"Walkthroughs and techniques for common end-to-end use cases, like:\n",
|
||||
"\n",
|
||||
"- [Document question answering](https://python.langchain.com/docs/use_cases/question_answering/)\n",
|
||||
"- [Chatbots](https://python.langchain.com/docs/use_cases/chatbots/)\n",
|
||||
"- [Analyzing structured data](https://python.langchain.com/docs/use_cases/sql/)\n",
|
||||
"- [Document question answering](/docs/use_cases/question_answering/)\n",
|
||||
"- [Chatbots](/docs/use_cases/chatbots/)\n",
|
||||
"- [Analyzing structured data](/docs/use_cases/sql/)\n",
|
||||
"- and much more...\n",
|
||||
"\n",
|
||||
"### [Integrations](https://python.langchain.com/docs/integrations/providers/) [](\\#integrations \"Direct link to integrations\")\n",
|
||||
"### [Integrations](/docs/integrations/providers/) [](\\#integrations \"Direct link to integrations\")\n",
|
||||
"\n",
|
||||
"LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](https://python.langchain.com/docs/integrations/providers/).\n",
|
||||
"LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/docs/integrations/providers/).\n",
|
||||
"\n",
|
||||
"### [Guides](https://python.langchain.com/docs/guides/debugging) [](\\#guides \"Direct link to guides\")\n",
|
||||
"### [Guides](/docs/guides/debugging) [](\\#guides \"Direct link to guides\")\n",
|
||||
"\n",
|
||||
"Best practices for developing with LangChain.\n",
|
||||
"\n",
|
||||
@@ -159,11 +157,11 @@
|
||||
"\n",
|
||||
"Head to the reference section for full documentation of all classes and methods in the LangChain and LangChain Experimental Python packages.\n",
|
||||
"\n",
|
||||
"### [Developer's guide](https://python.langchain.com/docs/contributing) [](\\#developers-guide \"Direct link to developers-guide\")\n",
|
||||
"### [Developer's guide](/docs/contributing) [](\\#developers-guide \"Direct link to developers-guide\")\n",
|
||||
"\n",
|
||||
"Check out the developer's guide for guidelines on contributing and help getting your dev environment set up.\n",
|
||||
"\n",
|
||||
"Head to the [Community navigator](https://python.langchain.com/docs/community) to find places to ask questions, share feedback, meet other developers, and dream about the future of LLM’s.\n"
|
||||
"Head to the [Community navigator](/docs/community) to find places to ask questions, share feedback, meet other developers, and dream about the future of LLM’s.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -0,0 +1,466 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b9bba344bbe0b4bd",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"# AI21SemanticTextSplitter\n",
|
||||
"\n",
|
||||
"This example goes over how to use AI21SemanticTextSplitter in LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d8e4cdb63fbc34ec",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b09bb1cd2c7e036a",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install langchain-ai21"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ba1d80fe8d82be89",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Environment Setup\n",
|
||||
"\n",
|
||||
"We'll need to get a AI21 API key and set the AI21_API_KEY environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "844b8f744d22bcb6",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"os.environ[\"AI21_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3e670b278e6b2b9e",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"## Example Usages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f61c5c981f01ad31",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Splitting text by semantic meaning"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e7da988112712cf3",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"This example shows how to use AI21SemanticTextSplitter to split a text into chunks based on semantic meaning."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1d82b65c9b8684f3",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21SemanticTextSplitter\n",
|
||||
"\n",
|
||||
"TEXT = (\n",
|
||||
" \"We’ve all experienced reading long, tedious, and boring pieces of text - financial reports, \"\n",
|
||||
" \"legal documents, or terms and conditions (though, who actually reads those terms and conditions to be honest?).\\n\"\n",
|
||||
" \"Imagine a company that employs hundreds of thousands of employees. In today's information \"\n",
|
||||
" \"overload age, nearly 30% of the workday is spent dealing with documents. There's no surprise \"\n",
|
||||
" \"here, given that some of these documents are long and convoluted on purpose (did you know that \"\n",
|
||||
" \"reading through all your privacy policies would take almost a quarter of a year?). Aside from \"\n",
|
||||
" \"inefficiency, workers may simply refrain from reading some documents (for example, Only 16% of \"\n",
|
||||
" \"Employees Read Their Employment Contracts Entirely Before Signing!).\\nThis is where AI-driven summarization \"\n",
|
||||
" \"tools can be helpful: instead of reading entire documents, which is tedious and time-consuming, \"\n",
|
||||
" \"users can (ideally) quickly extract relevant information from a text. With large language models, \"\n",
|
||||
" \"the development of those tools is easier than ever, and you can offer your users a summary that is \"\n",
|
||||
" \"specifically tailored to their preferences.\\nLarge language models naturally follow patterns in input \"\n",
|
||||
" \"(prompt), and provide coherent completion that follows the same patterns. For that, we want to feed \"\n",
|
||||
" 'them with several examples in the input (\"few-shot prompt\"), so they can follow through. '\n",
|
||||
" \"The process of creating the correct prompt for your problem is called prompt engineering, \"\n",
|
||||
" \"and you can read more about it here.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"semantic_text_splitter = AI21SemanticTextSplitter()\n",
|
||||
"chunks = semantic_text_splitter.split_text(TEXT)\n",
|
||||
"\n",
|
||||
"print(f\"The text has been split into {len(chunks)} chunks.\")\n",
|
||||
"for chunk in chunks:\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"====\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e8d1fcf818a8a81",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Splitting text by semantic meaning with merge"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c307abbc216fe89f",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"This example shows how to use AI21SemanticTextSplitter to split a text into chunks based on semantic meaning, then merging the chunks based on `chunk_size`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5651c581fcc1ff02",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21SemanticTextSplitter\n",
|
||||
"\n",
|
||||
"TEXT = (\n",
|
||||
" \"We’ve all experienced reading long, tedious, and boring pieces of text - financial reports, \"\n",
|
||||
" \"legal documents, or terms and conditions (though, who actually reads those terms and conditions to be honest?).\\n\"\n",
|
||||
" \"Imagine a company that employs hundreds of thousands of employees. In today's information \"\n",
|
||||
" \"overload age, nearly 30% of the workday is spent dealing with documents. There's no surprise \"\n",
|
||||
" \"here, given that some of these documents are long and convoluted on purpose (did you know that \"\n",
|
||||
" \"reading through all your privacy policies would take almost a quarter of a year?). Aside from \"\n",
|
||||
" \"inefficiency, workers may simply refrain from reading some documents (for example, Only 16% of \"\n",
|
||||
" \"Employees Read Their Employment Contracts Entirely Before Signing!).\\nThis is where AI-driven summarization \"\n",
|
||||
" \"tools can be helpful: instead of reading entire documents, which is tedious and time-consuming, \"\n",
|
||||
" \"users can (ideally) quickly extract relevant information from a text. With large language models, \"\n",
|
||||
" \"the development of those tools is easier than ever, and you can offer your users a summary that is \"\n",
|
||||
" \"specifically tailored to their preferences.\\nLarge language models naturally follow patterns in input \"\n",
|
||||
" \"(prompt), and provide coherent completion that follows the same patterns. For that, we want to feed \"\n",
|
||||
" 'them with several examples in the input (\"few-shot prompt\"), so they can follow through. '\n",
|
||||
" \"The process of creating the correct prompt for your problem is called prompt engineering, \"\n",
|
||||
" \"and you can read more about it here.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"semantic_text_splitter_chunks = AI21SemanticTextSplitter(chunk_size=1000)\n",
|
||||
"chunks = semantic_text_splitter_chunks.split_text(TEXT)\n",
|
||||
"\n",
|
||||
"print(f\"The text has been split into {len(chunks)} chunks.\")\n",
|
||||
"for chunk in chunks:\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"====\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b464db855e547cbb",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Splitting text to documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4410e8467012b193",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"This example shows how to use AI21SemanticTextSplitter to split a text into Documents based on semantic meaning. The metadata will contain a type for each document."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3cf131d9be910115",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21SemanticTextSplitter\n",
|
||||
"\n",
|
||||
"TEXT = (\n",
|
||||
" \"We’ve all experienced reading long, tedious, and boring pieces of text - financial reports, \"\n",
|
||||
" \"legal documents, or terms and conditions (though, who actually reads those terms and conditions to be honest?).\\n\"\n",
|
||||
" \"Imagine a company that employs hundreds of thousands of employees. In today's information \"\n",
|
||||
" \"overload age, nearly 30% of the workday is spent dealing with documents. There's no surprise \"\n",
|
||||
" \"here, given that some of these documents are long and convoluted on purpose (did you know that \"\n",
|
||||
" \"reading through all your privacy policies would take almost a quarter of a year?). Aside from \"\n",
|
||||
" \"inefficiency, workers may simply refrain from reading some documents (for example, Only 16% of \"\n",
|
||||
" \"Employees Read Their Employment Contracts Entirely Before Signing!).\\nThis is where AI-driven summarization \"\n",
|
||||
" \"tools can be helpful: instead of reading entire documents, which is tedious and time-consuming, \"\n",
|
||||
" \"users can (ideally) quickly extract relevant information from a text. With large language models, \"\n",
|
||||
" \"the development of those tools is easier than ever, and you can offer your users a summary that is \"\n",
|
||||
" \"specifically tailored to their preferences.\\nLarge language models naturally follow patterns in input \"\n",
|
||||
" \"(prompt), and provide coherent completion that follows the same patterns. For that, we want to feed \"\n",
|
||||
" 'them with several examples in the input (\"few-shot prompt\"), so they can follow through. '\n",
|
||||
" \"The process of creating the correct prompt for your problem is called prompt engineering, \"\n",
|
||||
" \"and you can read more about it here.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"semantic_text_splitter = AI21SemanticTextSplitter()\n",
|
||||
"documents = semantic_text_splitter.split_text_to_documents(TEXT)\n",
|
||||
"\n",
|
||||
"print(f\"The text has been split into {len(documents)} Documents.\")\n",
|
||||
"for doc in documents:\n",
|
||||
" print(f\"type: {doc.metadata['source_type']}\")\n",
|
||||
" print(f\"text: {doc.page_content}\")\n",
|
||||
" print(\"====\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b544ba21335d01a6",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Creating Documents with Metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c67f8c3ad89b8ad2",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"This example shows how to use AI21SemanticTextSplitter to create Documents from texts, and adding custom Metadata to each Document."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fe222d0e85249bda",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21SemanticTextSplitter\n",
|
||||
"\n",
|
||||
"TEXT = (\n",
|
||||
" \"We’ve all experienced reading long, tedious, and boring pieces of text - financial reports, \"\n",
|
||||
" \"legal documents, or terms and conditions (though, who actually reads those terms and conditions to be honest?).\\n\"\n",
|
||||
" \"Imagine a company that employs hundreds of thousands of employees. In today's information \"\n",
|
||||
" \"overload age, nearly 30% of the workday is spent dealing with documents. There's no surprise \"\n",
|
||||
" \"here, given that some of these documents are long and convoluted on purpose (did you know that \"\n",
|
||||
" \"reading through all your privacy policies would take almost a quarter of a year?). Aside from \"\n",
|
||||
" \"inefficiency, workers may simply refrain from reading some documents (for example, Only 16% of \"\n",
|
||||
" \"Employees Read Their Employment Contracts Entirely Before Signing!).\\nThis is where AI-driven summarization \"\n",
|
||||
" \"tools can be helpful: instead of reading entire documents, which is tedious and time-consuming, \"\n",
|
||||
" \"users can (ideally) quickly extract relevant information from a text. With large language models, \"\n",
|
||||
" \"the development of those tools is easier than ever, and you can offer your users a summary that is \"\n",
|
||||
" \"specifically tailored to their preferences.\\nLarge language models naturally follow patterns in input \"\n",
|
||||
" \"(prompt), and provide coherent completion that follows the same patterns. For that, we want to feed \"\n",
|
||||
" 'them with several examples in the input (\"few-shot prompt\"), so they can follow through. '\n",
|
||||
" \"The process of creating the correct prompt for your problem is called prompt engineering, \"\n",
|
||||
" \"and you can read more about it here.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"semantic_text_splitter = AI21SemanticTextSplitter()\n",
|
||||
"texts = [TEXT]\n",
|
||||
"documents = semantic_text_splitter.create_documents(\n",
|
||||
" texts=texts, metadatas=[{\"pikachu\": \"pika pika\"}]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(f\"The text has been split into {len(documents)} Documents.\")\n",
|
||||
"for doc in documents:\n",
|
||||
" print(f\"metadata: {doc.metadata}\")\n",
|
||||
" print(f\"text: {doc.page_content}\")\n",
|
||||
" print(\"====\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f8b5682c34142319",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Splitting text to documents with start index"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "359ea797c03ece85",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"This example shows how to use AI21SemanticTextSplitter to split a text into Documents based on semantic meaning. The metadata will contain a start index for each document.\n",
|
||||
"**Note** that the start index provides an indication of the order of the chunks rather than the actual start index for each chunk."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2dc39002f0c25784",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21SemanticTextSplitter\n",
|
||||
"\n",
|
||||
"TEXT = (\n",
|
||||
" \"We’ve all experienced reading long, tedious, and boring pieces of text - financial reports, \"\n",
|
||||
" \"legal documents, or terms and conditions (though, who actually reads those terms and conditions to be honest?).\\n\"\n",
|
||||
" \"Imagine a company that employs hundreds of thousands of employees. In today's information \"\n",
|
||||
" \"overload age, nearly 30% of the workday is spent dealing with documents. There's no surprise \"\n",
|
||||
" \"here, given that some of these documents are long and convoluted on purpose (did you know that \"\n",
|
||||
" \"reading through all your privacy policies would take almost a quarter of a year?). Aside from \"\n",
|
||||
" \"inefficiency, workers may simply refrain from reading some documents (for example, Only 16% of \"\n",
|
||||
" \"Employees Read Their Employment Contracts Entirely Before Signing!).\\nThis is where AI-driven summarization \"\n",
|
||||
" \"tools can be helpful: instead of reading entire documents, which is tedious and time-consuming, \"\n",
|
||||
" \"users can (ideally) quickly extract relevant information from a text. With large language models, \"\n",
|
||||
" \"the development of those tools is easier than ever, and you can offer your users a summary that is \"\n",
|
||||
" \"specifically tailored to their preferences.\\nLarge language models naturally follow patterns in input \"\n",
|
||||
" \"(prompt), and provide coherent completion that follows the same patterns. For that, we want to feed \"\n",
|
||||
" 'them with several examples in the input (\"few-shot prompt\"), so they can follow through. '\n",
|
||||
" \"The process of creating the correct prompt for your problem is called prompt engineering, \"\n",
|
||||
" \"and you can read more about it here.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"semantic_text_splitter = AI21SemanticTextSplitter(add_start_index=True)\n",
|
||||
"documents = semantic_text_splitter.create_documents(texts=[TEXT])\n",
|
||||
"print(f\"The text has been split into {len(documents)} Documents.\")\n",
|
||||
"for doc in documents:\n",
|
||||
" print(f\"start_index: {doc.metadata['start_index']}\")\n",
|
||||
" print(f\"text: {doc.page_content}\")\n",
|
||||
" print(\"====\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b62939cc5803b9fb",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"### Splitting documents"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "44162d340c0de5fb",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"This example shows how to use AI21SemanticTextSplitter to split a list of Documents into chunks based on semantic meaning."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8950c8e4e1208bf6",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_ai21 import AI21SemanticTextSplitter\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"TEXT = (\n",
|
||||
" \"We’ve all experienced reading long, tedious, and boring pieces of text - financial reports, \"\n",
|
||||
" \"legal documents, or terms and conditions (though, who actually reads those terms and conditions to be honest?).\\n\"\n",
|
||||
" \"Imagine a company that employs hundreds of thousands of employees. In today's information \"\n",
|
||||
" \"overload age, nearly 30% of the workday is spent dealing with documents. There's no surprise \"\n",
|
||||
" \"here, given that some of these documents are long and convoluted on purpose (did you know that \"\n",
|
||||
" \"reading through all your privacy policies would take almost a quarter of a year?). Aside from \"\n",
|
||||
" \"inefficiency, workers may simply refrain from reading some documents (for example, Only 16% of \"\n",
|
||||
" \"Employees Read Their Employment Contracts Entirely Before Signing!).\\nThis is where AI-driven summarization \"\n",
|
||||
" \"tools can be helpful: instead of reading entire documents, which is tedious and time-consuming, \"\n",
|
||||
" \"users can (ideally) quickly extract relevant information from a text. With large language models, \"\n",
|
||||
" \"the development of those tools is easier than ever, and you can offer your users a summary that is \"\n",
|
||||
" \"specifically tailored to their preferences.\\nLarge language models naturally follow patterns in input \"\n",
|
||||
" \"(prompt), and provide coherent completion that follows the same patterns. For that, we want to feed \"\n",
|
||||
" 'them with several examples in the input (\"few-shot prompt\"), so they can follow through. '\n",
|
||||
" \"The process of creating the correct prompt for your problem is called prompt engineering, \"\n",
|
||||
" \"and you can read more about it here.\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"semantic_text_splitter = AI21SemanticTextSplitter()\n",
|
||||
"document = Document(page_content=TEXT, metadata={\"hello\": \"goodbye\"})\n",
|
||||
"documents = semantic_text_splitter.split_documents([document])\n",
|
||||
"print(f\"The document list has been split into {len(documents)} Documents.\")\n",
|
||||
"for doc in documents:\n",
|
||||
" print(f\"text: {doc.page_content}\")\n",
|
||||
" print(f\"metadata: {doc.metadata}\")\n",
|
||||
" print(\"====\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f8f911b8d9ec22e5",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -43,7 +43,7 @@
|
||||
"source": [
|
||||
"## Environment Setup\n",
|
||||
"\n",
|
||||
"We'll need to get a [Anthropic](https://console.anthropic.com/settings/keys) and set the `ANTHROPIC_API_KEY` environment variable:"
|
||||
"We'll need to get an [Anthropic](https://console.anthropic.com/settings/keys) API key and set the `ANTHROPIC_API_KEY` environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Baseten\n",
|
||||
"\n",
|
||||
"[Baseten](https://baseten.co) is a [Provider](https://python.langchain.com/docs/integrations/providers/baseten) in the LangChain ecosystem that implements the LLMs component.\n",
|
||||
"[Baseten](https://baseten.co) is a [Provider](/docs/integrations/providers/baseten) in the LangChain ecosystem that implements the LLMs component.\n",
|
||||
"\n",
|
||||
"This example demonstrates using an LLM — Mistral 7B hosted on Baseten — with LangChain."
|
||||
]
|
||||
@@ -83,7 +83,7 @@
|
||||
"\n",
|
||||
"We can chain together multiple calls to one or multiple models, which is the whole point of Langchain!\n",
|
||||
"\n",
|
||||
"For example, we can replace GPT with Mistral in this [demo of terminal emulation](https://python.langchain.com/docs/modules/agents/how_to/chatgpt_clone)."
|
||||
"For example, we can replace GPT with Mistral in this demo of terminal emulation."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,7 +167,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -181,10 +181,9 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"The integration lives in the `langchain-community` package. We also need to install the `cohere` package itself. We can install these with:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install -U langchain-community cohere\n",
|
||||
"pip install -U langchain-community langchain-cohere\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"We'll also need to get a [Cohere API key](https://cohere.com/) and set the `COHERE_API_KEY` environment variable:"
|
||||
@@ -39,7 +39,7 @@
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" ········\n"
|
||||
@@ -91,7 +91,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import Cohere\n",
|
||||
"from langchain_cohere import Cohere\n",
|
||||
"from langchain_core.messages import HumanMessage"
|
||||
]
|
||||
},
|
||||
@@ -255,7 +255,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -54,6 +54,8 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_fireworks import Fireworks\n",
|
||||
"\n",
|
||||
"if \"FIREWORKS_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Fireworks API Key:\")\n",
|
||||
"\n",
|
||||
@@ -181,7 +183,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms.fireworks import Fireworks\n",
|
||||
"from langchain_fireworks import Fireworks\n",
|
||||
"\n",
|
||||
"llm = Fireworks(\n",
|
||||
" model=\"accounts/fireworks/models/mixtral-8x7b-instruct\",\n",
|
||||
@@ -249,7 +251,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
277
docs/docs/integrations/llms/friendli.ipynb
Normal file
277
docs/docs/integrations/llms/friendli.ipynb
Normal file
@@ -0,0 +1,277 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Friendli\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Friendli\n",
|
||||
"\n",
|
||||
"> [Friendli](https://friendli.ai/) enhances AI application performance and optimizes cost savings with scalable, efficient deployment options, tailored for high-demand AI workloads.\n",
|
||||
"\n",
|
||||
"This tutorial guides you through integrating `Friendli` with LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Ensure the `langchain_community` and `friendli-client` are installed.\n",
|
||||
"\n",
|
||||
"```sh\n",
|
||||
"pip install -U langchain-comminity friendli-client.\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Sign in to [Friendli Suite](https://suite.friendli.ai/) to create a Personal Access Token, and set it as the `FRIENDLI_TOKEN` environment."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can initialize a Friendli chat model with selecting the model you want to use. The default model is `mixtral-8x7b-instruct-v0-1`. You can check the available models at [docs.friendli.ai](https://docs.periflow.ai/guides/serverless_endpoints/pricing#text-generation-models)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms.friendli import Friendli\n",
|
||||
"\n",
|
||||
"llm = Friendli(model=\"mixtral-8x7b-instruct-v0-1\", max_tokens=100, temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"`Frienli` supports all methods of [`LLM`](/docs/modules/model_io/llms/) including async APIs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can use functionality of `invoke`, `batch`, `generate`, and `stream`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.invoke(\"Tell me a joke.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"',\n",
|
||||
" 'Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"']"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.batch([\"Tell me a joke.\", \"Tell me a joke.\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[Generation(text='Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"')], [Generation(text='Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"')]], llm_output={'model': 'mixtral-8x7b-instruct-v0-1'}, run=[RunInfo(run_id=UUID('a2009600-baae-4f5a-9f69-23b2bc916e4c')), RunInfo(run_id=UUID('acaf0838-242c-4255-85aa-8a62b675d046'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm.generate([\"Tell me a joke.\", \"Tell me a joke.\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Username checks out.\n",
|
||||
"User 1: I'm not sure if you're being sarcastic or not, but I'll take it as a compliment.\n",
|
||||
"User 0: I'm not being sarcastic. I'm just saying that your username is very fitting.\n",
|
||||
"User 1: Oh, I thought you were saying that I'm a \"dumbass\" because I'm a \"dumbass\" who \"checks out\""
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in llm.stream(\"Tell me a joke.\"):\n",
|
||||
" print(chunk, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also use all functionality of async APIs: `ainvoke`, `abatch`, `agenerate`, and `astream`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await llm.ainvoke(\"Tell me a joke.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"',\n",
|
||||
" 'Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"']"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await llm.abatch([\"Tell me a joke.\", \"Tell me a joke.\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"LLMResult(generations=[[Generation(text=\"Username checks out.\\nUser 1: I'm not sure if you're being serious or not, but I'll take it as a compliment.\\nUser 0: I'm being serious. I'm not sure if you're being serious or not.\\nUser 1: I'm being serious. I'm not sure if you're being serious or not.\\nUser 0: I'm being serious. I'm not sure\")], [Generation(text=\"Username checks out.\\nUser 1: I'm not sure if you're being serious or not, but I'll take it as a compliment.\\nUser 0: I'm being serious. I'm not sure if you're being serious or not.\\nUser 1: I'm being serious. I'm not sure if you're being serious or not.\\nUser 0: I'm being serious. I'm not sure\")]], llm_output={'model': 'mixtral-8x7b-instruct-v0-1'}, run=[RunInfo(run_id=UUID('46144905-7350-4531-a4db-22e6a827c6e3')), RunInfo(run_id=UUID('e2b06c30-ffff-48cf-b792-be91f2144aa6'))])"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await llm.agenerate([\"Tell me a joke.\", \"Tell me a joke.\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Username checks out.\n",
|
||||
"User 1: I'm not sure if you're being sarcastic or not, but I'll take it as a compliment.\n",
|
||||
"User 0: I'm not being sarcastic. I'm just saying that your username is very fitting.\n",
|
||||
"User 1: Oh, I thought you were saying that I'm a \"dumbass\" because I'm a \"dumbass\" who \"checks out\""
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async for chunk in llm.astream(\"Tell me a joke.\"):\n",
|
||||
" print(chunk, end=\"\", flush=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "langchain",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -15,7 +15,10 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
"collapsed": true,
|
||||
"pycharm": {
|
||||
"is_executing": true
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -28,13 +31,14 @@
|
||||
"collapsed": false
|
||||
},
|
||||
"source": [
|
||||
"To get GigaChat credentials you need to [create account](https://developers.sber.ru/studio/login) and [get access to API](https://developers.sber.ru/docs/ru/gigachat/api/integration)\n",
|
||||
"To get GigaChat credentials you need to [create account](https://developers.sber.ru/studio/login) and [get access to API](https://developers.sber.ru/docs/ru/gigachat/individuals-quickstart)\n",
|
||||
"\n",
|
||||
"## Example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
@@ -48,7 +52,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
@@ -56,12 +60,12 @@
|
||||
"source": [
|
||||
"from langchain_community.llms import GigaChat\n",
|
||||
"\n",
|
||||
"llm = GigaChat(verify_ssl_certs=False)"
|
||||
"llm = GigaChat(verify_ssl_certs=False, scope=\"GIGACHAT_API_PERS\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
@@ -84,8 +88,8 @@
|
||||
"\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"\n",
|
||||
"generated = llm_chain.run(country=\"Russia\")\n",
|
||||
"print(generated)"
|
||||
"generated = llm_chain.invoke(input={\"country\": \"Russia\"})\n",
|
||||
"print(generated[\"text\"])"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"id": "bead5ede-d9cc-44b9-b062-99c90a10cf40",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"A guide on using [Google Generative AI](https://developers.generativeai.google/) models with Langchain. Note: It's separate from Google Cloud Vertex AI [integration](https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm)."
|
||||
"A guide on using [Google Generative AI](https://developers.generativeai.google/) models with Langchain. Note: It's separate from Google Cloud Vertex AI [integration](/docs/integrations/llms/google_vertex_ai_palm)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"\n",
|
||||
"The [Hugging Face Model Hub](https://huggingface.co/models) hosts over 120k models, 20k datasets, and 50k demo apps (Spaces), all open source and publicly available, in an online platform where people can easily collaborate and build ML together.\n",
|
||||
"\n",
|
||||
"These can be called from LangChain either through this local pipeline wrapper or by calling their hosted inference endpoints through the HuggingFaceHub class. For more information on the hosted pipelines, see the [HuggingFaceHub](./huggingface_hub) notebook."
|
||||
"These can be called from LangChain either through this local pipeline wrapper or by calling their hosted inference endpoints through the HuggingFaceHub class."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -256,7 +256,27 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!optimum-cli export openvino --model gpt2 ov_model"
|
||||
"!optimum-cli export openvino --model gpt2 ov_model_dir"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f7a6d21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is recommended to apply 8 or 4-bit weight quantization to reduce inference latency and model footprint using `--weight-format`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "97088ea0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!optimum-cli export openvino --model gpt2 --weight-format int8 ov_model_dir # for 8-bit quantization\n",
|
||||
"\n",
|
||||
"!optimum-cli export openvino --model gpt2 --weight-format int4 ov_model_dir # for 4-bit quantization"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -267,7 +287,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ov_llm = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"ov_model\",\n",
|
||||
" model_id=\"ov_model_dir\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" backend=\"openvino\",\n",
|
||||
" model_kwargs={\"device\": \"CPU\", \"ov_config\": ov_config},\n",
|
||||
@@ -280,6 +300,38 @@
|
||||
"\n",
|
||||
"print(ov_chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a2c5726c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can get additional inference speed improvement with Dynamic Quantization of activations and KV-cache quantization. These options can be enabled with `ov_config` as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a1f9c2c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ov_config = {\n",
|
||||
" \"KV_CACHE_PRECISION\": \"u8\",\n",
|
||||
" \"DYNAMIC_QUANTIZATION_GROUP_SIZE\": \"32\",\n",
|
||||
" \"PERFORMANCE_HINT\": \"LATENCY\",\n",
|
||||
" \"NUM_STREAMS\": \"1\",\n",
|
||||
" \"CACHE_DIR\": \"\",\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "da9a9239",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more information refer to [OpenVINO LLM guide](https://docs.openvino.ai/2024/openvino-workflow/generative-ai-models-guide.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -208,11 +208,9 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import LlamaCpp"
|
||||
"from langchain_community.llms import LlamaCpp\n",
|
||||
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -329,10 +327,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = \"\"\"\n",
|
||||
"question = \"\"\"\n",
|
||||
"Question: A rap battle between Stephen Colbert and John Oliver\n",
|
||||
"\"\"\"\n",
|
||||
"llm.invoke(prompt)"
|
||||
"llm.invoke(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -360,7 +358,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
"llm_chain = prompt | llm"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -406,7 +404,7 @@
|
||||
],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
|
||||
"llm_chain.run(question)"
|
||||
"llm_chain.invoke({\"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -488,9 +486,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
|
||||
"llm_chain = prompt | llm\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
|
||||
"llm_chain.run(question)"
|
||||
"llm_chain.invoke({\"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -710,7 +708,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -105,7 +105,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To learn more about the LangChain Expressive Language and the available methods on an LLM, see the [LCEL Interface](https://python.langchain.com/docs/expression_language/interface)"
|
||||
"To learn more about the LangChain Expressive Language and the available methods on an LLM, see the [LCEL Interface](/docs/expression_language/interface)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 3,
|
||||
"id": "10ad9224",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-02-02T21:34:23.461332Z",
|
||||
"start_time": "2024-02-02T21:34:23.394461Z"
|
||||
"end_time": "2024-03-18T01:01:08.425930Z",
|
||||
"start_time": "2024-03-18T01:01:08.327196Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
@@ -41,7 +41,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 11,
|
||||
"id": "426ff912",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -1356,16 +1356,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Azure Cosmos DB Semantic Cache"
|
||||
],
|
||||
"id": "40624c26e86b57a4",
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"id": "40624c26e86b57a4"
|
||||
"source": [
|
||||
"## Azure Cosmos DB Semantic Cache\n",
|
||||
"\n",
|
||||
"You can use this integrated [vector database](https://learn.microsoft.com/en-us/azure/cosmos-db/vector-database) for caching."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4a9d592db01b11b2",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-03-18T01:01:32.014750Z",
|
||||
"start_time": "2024-03-18T01:01:31.955991Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.cache import AzureCosmosDBSemanticCache\n",
|
||||
@@ -1377,11 +1387,11 @@
|
||||
"\n",
|
||||
"# Read more about Azure CosmosDB Mongo vCore vector search here https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/vector-search\n",
|
||||
"\n",
|
||||
"INDEX_NAME = \"langchain-test-index\"\n",
|
||||
"NAMESPACE = \"langchain_test_db.langchain_test_collection\"\n",
|
||||
"CONNECTION_STRING = (\n",
|
||||
" \"Please provide your azure cosmos mongo vCore vector db connection string\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"DB_NAME, COLLECTION_NAME = NAMESPACE.split(\".\")\n",
|
||||
"\n",
|
||||
"# Default value for these params\n",
|
||||
@@ -1392,7 +1402,9 @@
|
||||
"m = 16\n",
|
||||
"ef_construction = 64\n",
|
||||
"ef_search = 40\n",
|
||||
"score_threshold = 0.1\n",
|
||||
"score_threshold = 0.9\n",
|
||||
"application_name = \"LANGCHAIN_CACHING_PYTHON\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"set_llm_cache(\n",
|
||||
" AzureCosmosDBSemanticCache(\n",
|
||||
@@ -1409,18 +1421,10 @@
|
||||
" ef_construction=ef_construction,\n",
|
||||
" ef_search=ef_search,\n",
|
||||
" score_threshold=score_threshold,\n",
|
||||
" application_name=application_name,\n",
|
||||
" )\n",
|
||||
")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-02-02T21:34:49.457001Z",
|
||||
"start_time": "2024-02-02T21:34:49.411293Z"
|
||||
}
|
||||
},
|
||||
"id": "4a9d592db01b11b2",
|
||||
"execution_count": 16
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
@@ -1429,15 +1433,15 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 43.4 ms, sys: 7.23 ms, total: 50.7 ms\n",
|
||||
"Wall time: 1.61 s\n"
|
||||
"CPU times: user 45.6 ms, sys: 19.7 ms, total: 65.3 ms\n",
|
||||
"Wall time: 2.29 s\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "\"\\n\\nWhy couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\""
|
||||
"text/plain": "'\\n\\nWhy was the math book sad? Because it had too many problems.'"
|
||||
},
|
||||
"execution_count": 17,
|
||||
"execution_count": 82,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -1450,47 +1454,46 @@
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-02-02T21:34:53.704234Z",
|
||||
"start_time": "2024-02-02T21:34:52.091096Z"
|
||||
"end_time": "2024-03-12T00:12:57.462226Z",
|
||||
"start_time": "2024-03-12T00:12:55.166201Z"
|
||||
}
|
||||
},
|
||||
"id": "8488cf9c97ec7ab",
|
||||
"execution_count": 17
|
||||
"id": "14ca942820e8140c",
|
||||
"execution_count": 82
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 6.89 ms, sys: 2.24 ms, total: 9.13 ms\n",
|
||||
"Wall time: 337 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "\"\\n\\nWhy couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\""
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-02-02T21:34:56.004502Z",
|
||||
"start_time": "2024-02-02T21:34:55.650136Z"
|
||||
}
|
||||
},
|
||||
"execution_count": 83,
|
||||
"id": "bc1570a2a77b58c8",
|
||||
"execution_count": 18
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-03-12T00:13:03.652755Z",
|
||||
"start_time": "2024-03-12T00:13:03.159428Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 9.61 ms, sys: 3.42 ms, total: 13 ms\n",
|
||||
"Wall time: 474 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": "'\\n\\nWhy was the math book sad? Because it had too many problems.'"
|
||||
},
|
||||
"execution_count": 83,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%time\n",
|
||||
"# The first time, it is not yet in cache, so it should take longer\n",
|
||||
"llm(\"Tell me a joke\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@@ -1741,7 +1744,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.17"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -6,22 +6,19 @@
|
||||
"source": [
|
||||
"# OctoAI\n",
|
||||
"\n",
|
||||
">[OctoML](https://docs.octoai.cloud/docs) is a service with efficient compute. It enables users to integrate their choice of AI models into applications. The `OctoAI` compute service helps you run, tune, and scale AI applications.\n",
|
||||
"[OctoAI](https://docs.octoai.cloud/docs) offers easy access to efficient compute and enables users to integrate their choice of AI models into applications. The `OctoAI` compute service helps you run, tune, and scale AI applications easily.\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with `OctoAI` [LLM endpoints](https://octoai.cloud/templates)\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To run our example app, there are four simple steps to take:\n",
|
||||
"To run our example app, there are two simple steps to take:\n",
|
||||
"\n",
|
||||
"1. Clone the MPT-7B demo template to your OctoAI account by visiting <https://octoai.cloud/templates/mpt-7b-demo> then clicking \"Clone Template.\" \n",
|
||||
" 1. If you want to use a different LLM model, you can also containerize the model and make a custom OctoAI endpoint yourself, by following [Build a Container from Python](doc:create-custom-endpoints-from-python-code) and [Create a Custom Endpoint from a Container](doc:create-custom-endpoints-from-a-container)\n",
|
||||
"1. Get an API Token from [your OctoAI account page](https://octoai.cloud/settings).\n",
|
||||
" \n",
|
||||
"2. Paste your Endpoint URL in the code cell below\n",
|
||||
"2. Paste your API key in in the code cell below.\n",
|
||||
"\n",
|
||||
"3. Get an API Token from [your OctoAI account page](https://octoai.cloud/settings).\n",
|
||||
" \n",
|
||||
"4. Paste your API key in in the code cell below"
|
||||
"Note: If you want to use a different LLM model, you can containerize the model and make a custom OctoAI endpoint yourself, by following [Build a Container from Python](https://octo.ai/docs/bring-your-own-model/advanced-build-a-container-from-scratch-in-python) and [Create a Custom Endpoint from a Container](https://octo.ai/docs/bring-your-own-model/create-custom-endpoints-from-a-container/create-custom-endpoints-from-a-container) and then update your Endpoint URL in the code cell below.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -175,7 +175,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To learn more about the LangChain Expressive Language and the available methods on an LLM, see the [LCEL Interface](https://python.langchain.com/docs/expression_language/interface)"
|
||||
"To learn more about the LangChain Expressive Language and the available methods on an LLM, see the [LCEL Interface](/docs/expression_language/interface)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
249
docs/docs/integrations/llms/openvino.ipynb
Normal file
249
docs/docs/integrations/llms/openvino.ipynb
Normal file
@@ -0,0 +1,249 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "959300d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenVINO Local Pipelines\n",
|
||||
"\n",
|
||||
"[OpenVINO™](https://github.com/openvinotoolkit/openvino) is an open-source toolkit for optimizing and deploying AI inference. The OpenVINO™ Runtime can infer models on different hardware [devices](https://github.com/openvinotoolkit/openvino?tab=readme-ov-file#supported-hardware-matrix). It can help to boost deep learning performance in computer vision, automatic speech recognition, natural language processing and other common tasks.\n",
|
||||
"\n",
|
||||
"OpenVINO models can be run locally through the `HuggingFacePipeline` [class](https://python.langchain.com/docs/integrations/llms/huggingface_pipeline). To deploy a model with OpenVINO, you can specify the `backend=\"openvino\"` parameter to trigger OpenVINO as backend inference framework."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4c1b8450-5eaf-4d34-8341-2d785448a1ff",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"source": [
|
||||
"To use, you should have the ``optimum-intel`` with OpenVINO Accelerator python [package installed](https://github.com/huggingface/optimum-intel?tab=readme-ov-file#installation)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d772b637-de00-4663-bd77-9bc96d798db2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade-strategy eager \"optimum[openvino,nncf]\" --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "91ad075f-71d5-4bc8-ab91-cc0ad5ef16bb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Model Loading\n",
|
||||
"\n",
|
||||
"Models can be loaded by specifying the model parameters using the `from_model_id` method.\n",
|
||||
"\n",
|
||||
"If you have an Intel GPU, you can specify `model_kwargs={\"device\": \"GPU\"}` to run inference on it."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "165ae236-962a-4763-8052-c4836d78a5d2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline\n",
|
||||
"\n",
|
||||
"ov_config = {\"PERFORMANCE_HINT\": \"LATENCY\", \"NUM_STREAMS\": \"1\", \"CACHE_DIR\": \"\"}\n",
|
||||
"\n",
|
||||
"ov_llm = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"gpt2\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" backend=\"openvino\",\n",
|
||||
" model_kwargs={\"device\": \"CPU\", \"ov_config\": ov_config},\n",
|
||||
" pipeline_kwargs={\"max_new_tokens\": 10},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "00104b27-0c15-4a97-b198-4512337ee211",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"They can also be loaded by passing in an existing `optimum-intel` pipeline directly"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7f426a4f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from optimum.intel.openvino import OVModelForCausalLM\n",
|
||||
"from transformers import AutoTokenizer, pipeline\n",
|
||||
"\n",
|
||||
"model_id = \"gpt2\"\n",
|
||||
"device = \"CPU\"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
|
||||
"ov_model = OVModelForCausalLM.from_pretrained(\n",
|
||||
" model_id, device=device, ov_config=ov_config\n",
|
||||
")\n",
|
||||
"ov_pipe = pipeline(\n",
|
||||
" \"text-generation\", model=ov_model, tokenizer=tokenizer, max_new_tokens=10\n",
|
||||
")\n",
|
||||
"hf = HuggingFacePipeline(pipeline=ov_pipe)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "60e7ba8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create Chain\n",
|
||||
"\n",
|
||||
"With the model loaded into memory, you can compose it with a prompt to\n",
|
||||
"form a chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3acf0069",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"\n",
|
||||
"chain = prompt | ov_llm\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "12524837-e9ab-455a-86be-66b95f4f893a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Inference with local OpenVINO model\n",
|
||||
"\n",
|
||||
"It is possible to [export your model](https://github.com/huggingface/optimum-intel?tab=readme-ov-file#export) to the OpenVINO IR format with the CLI, and load the model from local folder.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3d1104a2-79c7-43a6-aa1c-8076a5ad7747",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!optimum-cli export openvino --model gpt2 ov_model_dir"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f7a6d21",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is recommended to apply 8 or 4-bit weight quantization to reduce inference latency and model footprint using `--weight-format`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "97088ea0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!optimum-cli export openvino --model gpt2 --weight-format int8 ov_model_dir # for 8-bit quantization\n",
|
||||
"\n",
|
||||
"!optimum-cli export openvino --model gpt2 --weight-format int4 ov_model_dir # for 4-bit quantization"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ac71e60d-5595-454e-8602-03ebb0248205",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ov_llm = HuggingFacePipeline.from_model_id(\n",
|
||||
" model_id=\"ov_model_dir\",\n",
|
||||
" task=\"text-generation\",\n",
|
||||
" backend=\"openvino\",\n",
|
||||
" model_kwargs={\"device\": \"CPU\", \"ov_config\": ov_config},\n",
|
||||
" pipeline_kwargs={\"max_new_tokens\": 10},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"ov_chain = prompt | ov_llm\n",
|
||||
"\n",
|
||||
"question = \"What is electroencephalography?\"\n",
|
||||
"\n",
|
||||
"print(ov_chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a2c5726c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can get additional inference speed improvement with Dynamic Quantization of activations and KV-cache quantization. These options can be enabled with `ov_config` as follows:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a1f9c2c5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ov_config = {\n",
|
||||
" \"KV_CACHE_PRECISION\": \"u8\",\n",
|
||||
" \"DYNAMIC_QUANTIZATION_GROUP_SIZE\": \"32\",\n",
|
||||
" \"PERFORMANCE_HINT\": \"LATENCY\",\n",
|
||||
" \"NUM_STREAMS\": \"1\",\n",
|
||||
" \"CACHE_DIR\": \"\",\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "da9a9239",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For more information refer to [OpenVINO LLM guide](https://docs.openvino.ai/2024/openvino-workflow/generative-ai-models-guide.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
"\n",
|
||||
"This example showcases how to connect to [PromptLayer](https://www.promptlayer.com) to start recording your OpenAI requests.\n",
|
||||
"\n",
|
||||
"Another example is [here](https://python.langchain.com/docs/integrations/providers/promptlayer)."
|
||||
"Another example is [here](/docs/integrations/providers/promptlayer)."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -290,7 +290,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Streaming Response\n",
|
||||
"You can optionally stream the response as it is produced, which is helpful to show interactivity to users for time-consuming generations. See detailed docs on [Streaming](https://python.langchain.com/docs/modules/model_io/llms/how_to/streaming_llm) for more information."
|
||||
"You can optionally stream the response as it is produced, which is helpful to show interactivity to users for time-consuming generations. See detailed docs on [Streaming](/docs/modules/model_io/llms/streaming_llm) for more information."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -540,9 +540,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "poetry-venv",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "poetry-venv"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"https://api.together.xyz/settings/api-keys. This can be passed in as init param\n",
|
||||
"``together_api_key`` or set as environment variable ``TOGETHER_API_KEY``.\n",
|
||||
"\n",
|
||||
"Together API reference: https://docs.together.ai/reference/inference"
|
||||
"Together API reference: https://docs.together.ai/reference"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
"source": [
|
||||
"country = \"Russia\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(country)"
|
||||
"llm_chain.invoke(country)"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user