mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-18 04:25:22 +00:00
Compare commits
435 Commits
harrison/r
...
v0.0.281
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
098b4aa465 | ||
|
|
699f58fb83 | ||
|
|
de9e545542 | ||
|
|
cb928ed3d5 | ||
|
|
27944cb611 | ||
|
|
10e0431e48 | ||
|
|
e0f6ba08d6 | ||
|
|
31bbe80758 | ||
|
|
de3322609e | ||
|
|
7403faa063 | ||
|
|
f6f0b0f975 | ||
|
|
803d0d9656 | ||
|
|
03174c91d0 | ||
|
|
9bcfd58580 | ||
|
|
056e59672b | ||
|
|
0b6993987f | ||
|
|
68f2363f5d | ||
|
|
43c4c6dfcc | ||
|
|
c585351bdc | ||
|
|
433c4a721e | ||
|
|
c9ff0ab2e9 | ||
|
|
16945c9922 | ||
|
|
8bc452a466 | ||
|
|
fe0e191fb3 | ||
|
|
7d48c2884e | ||
|
|
e34dde3d15 | ||
|
|
94efede93c | ||
|
|
c0518be1f1 | ||
|
|
50ca44c79f | ||
|
|
7d8bb78e5c | ||
|
|
33f43cc1b0 | ||
|
|
7d1b0fbe79 | ||
|
|
ce47124e8f | ||
|
|
f59e5d48ed | ||
|
|
507e46844e | ||
|
|
fed137a8a9 | ||
|
|
794ff2dae8 | ||
|
|
4765c09703 | ||
|
|
16a27ab244 | ||
|
|
00a7c31ffd | ||
|
|
a52fe9528e | ||
|
|
b8baead70c | ||
|
|
abd8681341 | ||
|
|
4dc47bd3ac | ||
|
|
bc8cceebf7 | ||
|
|
872d829201 | ||
|
|
5c7afe8aae | ||
|
|
387813bfb2 | ||
|
|
cf5a50469f | ||
|
|
f4bed8a04c | ||
|
|
05664a6f20 | ||
|
|
565c021730 | ||
|
|
2221194450 | ||
|
|
5c3e9c9083 | ||
|
|
6d82503eb1 | ||
|
|
4abe85be57 | ||
|
|
f5af756397 | ||
|
|
9e6cc7b236 | ||
|
|
0c0a7d19eb | ||
|
|
f968b86652 | ||
|
|
765ef3b486 | ||
|
|
746c6ff9c3 | ||
|
|
fdebd3e02f | ||
|
|
0e4c5dd176 | ||
|
|
42582adb66 | ||
|
|
9e196cb470 | ||
|
|
f8bca156d4 | ||
|
|
30239b3025 | ||
|
|
54a8df87b9 | ||
|
|
b485c3048b | ||
|
|
f2fc4173c3 | ||
|
|
37e435bd00 | ||
|
|
3b8ee74e38 | ||
|
|
afd96b2460 | ||
|
|
58d7d86e51 | ||
|
|
a7c9bd30d4 | ||
|
|
491089754d | ||
|
|
b5a74fb973 | ||
|
|
71c418725f | ||
|
|
427f696fb0 | ||
|
|
b927277809 | ||
|
|
d4380339c1 | ||
|
|
d7bf7dc412 | ||
|
|
355ff09cce | ||
|
|
3dafbd852e | ||
|
|
c7a5504789 | ||
|
|
5f1c67b47c | ||
|
|
561ac17248 | ||
|
|
5569385ee1 | ||
|
|
b1c87da2b0 | ||
|
|
e17275ee57 | ||
|
|
63306899a2 | ||
|
|
7966af1e9c | ||
|
|
4c0e1e501c | ||
|
|
0eba80912f | ||
|
|
af2e4ce2cd | ||
|
|
85088dc5df | ||
|
|
4eecf90f33 | ||
|
|
2242e2160f | ||
|
|
b2ac835466 | ||
|
|
50a5c5bcf8 | ||
|
|
81ebcc161e | ||
|
|
fc42726ea0 | ||
|
|
897f791940 | ||
|
|
4d7cd6db5f | ||
|
|
f9a845b382 | ||
|
|
06e89c1caa | ||
|
|
738d93215d | ||
|
|
9a07032055 | ||
|
|
5426712311 | ||
|
|
f95bd0bcd9 | ||
|
|
f69155b4f7 | ||
|
|
a3c69cf41d | ||
|
|
324c86acd5 | ||
|
|
3f8f3de28e | ||
|
|
ad9e242a7a | ||
|
|
566ce06f4a | ||
|
|
c710c7303f | ||
|
|
cc6a20d3e6 | ||
|
|
86646ec555 | ||
|
|
02e51f4217 | ||
|
|
74fcfed4e2 | ||
|
|
641b71e2cd | ||
|
|
8d66b00c73 | ||
|
|
19400ba253 | ||
|
|
29270e0378 | ||
|
|
5b913003e0 | ||
|
|
4b15328767 | ||
|
|
e60e1cdf23 | ||
|
|
3efab8d3df | ||
|
|
d43a36c32a | ||
|
|
6b5a970949 | ||
|
|
b1644bc9ad | ||
|
|
13fef1e5d3 | ||
|
|
e37d51cab6 | ||
|
|
52a3e8a261 | ||
|
|
e2e05ad89e | ||
|
|
f2e8399cc8 | ||
|
|
5341b04d68 | ||
|
|
b82ad19ed2 | ||
|
|
e805f8e263 | ||
|
|
1f5c579ef4 | ||
|
|
240cc289e6 | ||
|
|
7fa82900cb | ||
|
|
2f03e71e67 | ||
|
|
781f274d19 | ||
|
|
a8f804a618 | ||
|
|
98cce7dcd3 | ||
|
|
b3e3a31240 | ||
|
|
9828701de1 | ||
|
|
9870bfb9cd | ||
|
|
6da158388b | ||
|
|
24c0b01c38 | ||
|
|
588237ef30 | ||
|
|
e8f29be350 | ||
|
|
a28e888b36 | ||
|
|
cafce9ed23 | ||
|
|
8c4e29240c | ||
|
|
2d2b097fab | ||
|
|
d762a6b51f | ||
|
|
6a51672164 | ||
|
|
c844aaa7a6 | ||
|
|
a05fed9369 | ||
|
|
c26deb6b38 | ||
|
|
ffa5625134 | ||
|
|
bdccb1215a | ||
|
|
d966ba63e2 | ||
|
|
ec362ecbe2 | ||
|
|
56a0165a4e | ||
|
|
cedfad541d | ||
|
|
b31475c622 | ||
|
|
d03d6f6fd9 | ||
|
|
8fb0a9594c | ||
|
|
4eeba88905 | ||
|
|
8c1678a8c7 | ||
|
|
d799963870 | ||
|
|
7bba1d911b | ||
|
|
2e65434568 | ||
|
|
b416f5c0c8 | ||
|
|
8f199239b8 | ||
|
|
2a03a0087d | ||
|
|
f7cc125cac | ||
|
|
16eb935469 | ||
|
|
c70bb0ec28 | ||
|
|
0f85671630 | ||
|
|
78c014399f | ||
|
|
f69d236a4a | ||
|
|
0024824a6e | ||
|
|
210de0c66b | ||
|
|
5cce6529a4 | ||
|
|
bcc3463ff4 | ||
|
|
7cbe872af8 | ||
|
|
9f2d908316 | ||
|
|
3c1547925a | ||
|
|
fbd792ac7c | ||
|
|
8bd7a9d18e | ||
|
|
ede45f535e | ||
|
|
393816e7bd | ||
|
|
0fb95ebe66 | ||
|
|
7c7ae34eeb | ||
|
|
d578efba35 | ||
|
|
8dbf4cbe80 | ||
|
|
b5cd1e0fed | ||
|
|
6eae6df76f | ||
|
|
f5faac8859 | ||
|
|
4b6e41a939 | ||
|
|
6092422e10 | ||
|
|
c906041aa8 | ||
|
|
880bf06290 | ||
|
|
9efc29e3d1 | ||
|
|
d6957921f0 | ||
|
|
db13fba7ea | ||
|
|
49ebbe4bcd | ||
|
|
171b0b183b | ||
|
|
c80e406e95 | ||
|
|
dd10cf945c | ||
|
|
8f8455b24d | ||
|
|
bbae8cb88f | ||
|
|
4454204455 | ||
|
|
318a21e267 | ||
|
|
e71f4760db | ||
|
|
a5450be32e | ||
|
|
8b8d2a6535 | ||
|
|
1b6947e56c | ||
|
|
7979cef06a | ||
|
|
23ef836b48 | ||
|
|
766bbd6c6b | ||
|
|
64eb5a6082 | ||
|
|
8a4670e127 | ||
|
|
b1f649bca5 | ||
|
|
6d3485e798 | ||
|
|
82a3c2a557 | ||
|
|
e80834d783 | ||
|
|
7fdb7439e0 | ||
|
|
5d47833ae1 | ||
|
|
b1bffea9c7 | ||
|
|
e01b00aa54 | ||
|
|
47499c6db4 | ||
|
|
f327535eda | ||
|
|
cf122b6269 | ||
|
|
fe1b9ee6b8 | ||
|
|
907c57e324 | ||
|
|
3103f07e03 | ||
|
|
b14d74dd4d | ||
|
|
8393ba9dab | ||
|
|
eb3d1fa93c | ||
|
|
3a4d4c940c | ||
|
|
97741d41c5 | ||
|
|
7f5713b80a | ||
|
|
cb642ef658 | ||
|
|
5e2d0cf54e | ||
|
|
9aaa0fdce0 | ||
|
|
00baddf34c | ||
|
|
f97d3a76e7 | ||
|
|
5edf819524 | ||
|
|
610f46d83a | ||
|
|
c1badc1fa2 | ||
|
|
0d01cede03 | ||
|
|
63921e327d | ||
|
|
aab01b55db | ||
|
|
0da5803f5a | ||
|
|
a28eea5767 | ||
|
|
fa0b8f3368 | ||
|
|
12a373810c | ||
|
|
d57d08fd01 | ||
|
|
4339d21cf1 | ||
|
|
1960ac8d25 | ||
|
|
2ab04a4e32 | ||
|
|
985873c497 | ||
|
|
709a67d9bf | ||
|
|
9731ce5a40 | ||
|
|
cacaf487c3 | ||
|
|
135cb86215 | ||
|
|
d04fe0d3ea | ||
|
|
30151c99c7 | ||
|
|
ade482c17e | ||
|
|
87da56fb1e | ||
|
|
adb21782b8 | ||
|
|
3e5cda3405 | ||
|
|
dc30edf51c | ||
|
|
dff00ea91e | ||
|
|
0f48e6c36e | ||
|
|
a0800c9f15 | ||
|
|
2bcf581a23 | ||
|
|
22b6549a34 | ||
|
|
dacf96895a | ||
|
|
c37be7f5fb | ||
|
|
cf792891f1 | ||
|
|
f5ea725796 | ||
|
|
6bedfdf25a | ||
|
|
7cf5c582d2 | ||
|
|
9666e752b1 | ||
|
|
78ffcdd9a9 | ||
|
|
20d2c0571c | ||
|
|
9963b32e59 | ||
|
|
b048236c1a | ||
|
|
c19888c12c | ||
|
|
d0ff0db698 | ||
|
|
5990651070 | ||
|
|
25f2c82ae8 | ||
|
|
6283f3b63c | ||
|
|
9e1dbd4b49 | ||
|
|
b88dfcb42a | ||
|
|
c215481531 | ||
|
|
a9c86774da | ||
|
|
a8c916955f | ||
|
|
342087bdfa | ||
|
|
cbaea8d63b | ||
|
|
cd81e8a8f2 | ||
|
|
278ef0bdcf | ||
|
|
fa05e18278 | ||
|
|
20ce283fa7 | ||
|
|
6424b3cde0 | ||
|
|
da18e177f1 | ||
|
|
c326751085 | ||
|
|
6d19709b65 | ||
|
|
677da6a0fd | ||
|
|
64a958c85d | ||
|
|
1751fe114d | ||
|
|
882b97cfd2 | ||
|
|
3ddabe8b2c | ||
|
|
fdcd50aab4 | ||
|
|
9777c2801d | ||
|
|
93bbf67afc | ||
|
|
c184be5511 | ||
|
|
dacd5dcba8 | ||
|
|
80dd162e0d | ||
|
|
db4b256a28 | ||
|
|
3458489936 | ||
|
|
e420bf22b6 | ||
|
|
cc83f54694 | ||
|
|
d414d47c78 | ||
|
|
a40c12bb88 | ||
|
|
d8e2dd4c89 | ||
|
|
e2e582f1f6 | ||
|
|
5508baf1eb | ||
|
|
0154958243 | ||
|
|
a8e8a31b41 | ||
|
|
ef87affd4d | ||
|
|
1c64db575c | ||
|
|
ef2500584c | ||
|
|
8a03836160 | ||
|
|
f0ae10a20e | ||
|
|
39a5d02225 | ||
|
|
5b9bdcac1b | ||
|
|
eb92da84a1 | ||
|
|
2a06e7b216 | ||
|
|
f1072cc31f | ||
|
|
b379c5f9c8 | ||
|
|
e1f4f9ac3e | ||
|
|
b2d9970fc1 | ||
|
|
900c1f3e8d | ||
|
|
02545a54b3 | ||
|
|
632a83c48e | ||
|
|
fc64e6349e | ||
|
|
ca8232a3c1 | ||
|
|
f29312eb84 | ||
|
|
c06f34fa35 | ||
|
|
83986ea98a | ||
|
|
81163e3c0c | ||
|
|
f3ba9ce7f4 | ||
|
|
f1e602996a | ||
|
|
35812d0096 | ||
|
|
d564ec944c | ||
|
|
65e893b9cd | ||
|
|
64a54d8ad8 | ||
|
|
3c7cc4d440 | ||
|
|
3408810748 | ||
|
|
acb54d8b9d | ||
|
|
a1e89aa8d5 | ||
|
|
c75e1aa5ed | ||
|
|
2b663089b5 | ||
|
|
b868ef23bc | ||
|
|
e99ef12cb1 | ||
|
|
1720e99397 | ||
|
|
dfb9ff1079 | ||
|
|
1ea2f9adf4 | ||
|
|
d4c49b16e4 | ||
|
|
fba29f203a | ||
|
|
3c4f32c8b8 | ||
|
|
4d0b7bb8e1 | ||
|
|
033b874701 | ||
|
|
4e7e6bfe0a | ||
|
|
a9bf409a09 | ||
|
|
fa478638a9 | ||
|
|
182b059bf4 | ||
|
|
0fa4516ce4 | ||
|
|
04f2d69b83 | ||
|
|
0fea987dd2 | ||
|
|
00eff8c4a7 | ||
|
|
6c308aabae | ||
|
|
8bc1a3dca8 | ||
|
|
652c542b2f | ||
|
|
e92e199ec1 | ||
|
|
90fd840fb1 | ||
|
|
47a6b4d674 | ||
|
|
c4c79da071 | ||
|
|
069c0a041f | ||
|
|
5cd244e9b7 | ||
|
|
354c42afd2 | ||
|
|
4452314aab | ||
|
|
d3f10d2f4f | ||
|
|
6ae58da668 | ||
|
|
ddcb4ff5fb | ||
|
|
1baedc4e18 | ||
|
|
46f3850794 | ||
|
|
24a197f96a | ||
|
|
8ddaaf3d41 | ||
|
|
a5e7dcec61 | ||
|
|
c1b1666ec8 | ||
|
|
7fe474d198 | ||
|
|
ab21af71be | ||
|
|
6f69b19ff5 | ||
|
|
89bec58cbb | ||
|
|
9e906c39ba | ||
|
|
6b0a849f59 | ||
|
|
c447e9a854 | ||
|
|
bd80cad6db | ||
|
|
8c1a528c71 | ||
|
|
25cbcd9374 | ||
|
|
15a5002746 | ||
|
|
f8ed93e7bd | ||
|
|
05cdd22c39 | ||
|
|
eb0134fbb3 | ||
|
|
50b13ab938 | ||
|
|
429de77b3b | ||
|
|
04fcd2d2e0 | ||
|
|
5919c0f4a2 | ||
|
|
bcdf3be530 | ||
|
|
ef7f4aea32 | ||
|
|
224263aa24 | ||
|
|
dc4b037957 | ||
|
|
1fa5d94591 | ||
|
|
4806504ebc | ||
|
|
96843f3bd4 |
10
.github/CONTRIBUTING.md
vendored
10
.github/CONTRIBUTING.md
vendored
@@ -44,7 +44,7 @@ If you are adding an issue, please try to keep it focused on a single, modular b
|
||||
If two issues are related, or blocking, please link them rather than combining them.
|
||||
|
||||
We will try to keep these issues as up to date as possible, though
|
||||
with the rapid rate of develop in this field some may get out of date.
|
||||
with the rapid rate of development in this field some may get out of date.
|
||||
If you notice this happening, please let us know.
|
||||
|
||||
### 🙋Getting Help
|
||||
@@ -80,14 +80,14 @@ For example, to contribute to `langchain` run `cd libs/langchain` before getting
|
||||
To install requirements:
|
||||
|
||||
```bash
|
||||
poetry install -E all
|
||||
poetry install --with test
|
||||
```
|
||||
|
||||
This will install all requirements for running the package, examples, linting, formatting, tests, and coverage. Note the `-E all` flag will install all optional dependencies necessary for integration testing.
|
||||
This will install all requirements for running the package, examples, linting, formatting, tests, and coverage.
|
||||
|
||||
❗Note: If during installation you receive a `WheelFileValidationError` for `debugpy`, please make sure you are running Poetry v1.5.1. This bug was present in older versions of Poetry (e.g. 1.4.1) and has been resolved in newer releases. If you are still seeing this bug on v1.5.1, you may also try disabling "modern installation" (`poetry config installer.modern-installation false`) and re-installing requirements. See [this `debugpy` issue](https://github.com/microsoft/debugpy/issues/1246) for more details.
|
||||
|
||||
Now, you should be able to run the common tasks in the following section. To double check, run `make test`, all tests should pass. If they don't you may need to pip install additional dependencies, such as `numexpr` and `openapi_schema_pydantic`.
|
||||
Now assuming `make` and `pytest` are installed, you should be able to run the common tasks in the following section. To double check, run `make test` under `libs/langchain`, all tests should pass. If they don't, you may need to pip install additional dependencies, such as `numexpr` and `openapi_schema_pydantic`.
|
||||
|
||||
## ✅ Common Tasks
|
||||
|
||||
@@ -134,7 +134,7 @@ We recognize linting can be annoying - if you do not want to do it, please conta
|
||||
### Spellcheck
|
||||
|
||||
Spellchecking for this project is done via [codespell](https://github.com/codespell-project/codespell).
|
||||
Note that `codespell` finds common typos, so could have false-positive (correctly spelled but rarely used) and false-negatives (not finding misspelled) words.
|
||||
Note that `codespell` finds common typos, so it could have false-positive (correctly spelled but rarely used) and false-negatives (not finding misspelled) words.
|
||||
|
||||
To check spelling for this project:
|
||||
|
||||
|
||||
58
.github/actions/poetry_setup/action.yml
vendored
58
.github/actions/poetry_setup/action.yml
vendored
@@ -15,66 +15,52 @@ inputs:
|
||||
description: Poetry version
|
||||
required: true
|
||||
|
||||
install-command:
|
||||
description: Command run for installing dependencies
|
||||
required: false
|
||||
default: poetry install
|
||||
|
||||
cache-key:
|
||||
description: Cache key to use for manual handling of caching
|
||||
required: true
|
||||
|
||||
working-directory:
|
||||
description: Directory to run install-command in
|
||||
required: false
|
||||
default: ""
|
||||
description: Directory whose poetry.lock file should be cached
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: actions/setup-python@v4
|
||||
name: Setup python $${ inputs.python-version }}
|
||||
name: Setup python ${{ inputs.python-version }}
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- uses: actions/cache@v3
|
||||
id: cache-pip
|
||||
name: Cache Pip ${{ inputs.python-version }}
|
||||
id: cache-bin-poetry
|
||||
name: Cache Poetry binary - Python ${{ inputs.python-version }}
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1"
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pip
|
||||
key: pip-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}
|
||||
/opt/pipx/venvs/poetry
|
||||
/opt/pipx_bin/poetry
|
||||
# This step caches the poetry installation, so make sure it's keyed on the poetry version as well.
|
||||
key: bin-poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-${{ inputs.poetry-version }}
|
||||
|
||||
- run: pipx install poetry==${{ inputs.poetry-version }} --python python${{ inputs.python-version }}
|
||||
- name: Install poetry
|
||||
if: steps.cache-bin-poetry.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
|
||||
- name: Check Poetry File
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry check
|
||||
|
||||
- name: Check lock file
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry lock --check
|
||||
|
||||
- uses: actions/cache@v3
|
||||
id: cache-poetry
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
POETRY_VERSION: ${{ inputs.poetry-version }}
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
run: pipx install "poetry==$POETRY_VERSION" --python "python$PYTHON_VERSION" --verbose
|
||||
|
||||
- name: Restore pip and poetry cached dependencies
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "4"
|
||||
WORKDIR: ${{ inputs.working-directory == '' && '.' || inputs.working-directory }}
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pip
|
||||
~/.cache/pypoetry/virtualenvs
|
||||
~/.cache/pypoetry/cache
|
||||
~/.cache/pypoetry/artifacts
|
||||
${{ env.WORKDIR }}/.venv
|
||||
key: poetry-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
- run: ${{ inputs.install-command }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
shell: bash
|
||||
key: py-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ inputs.python-version }}-poetry-${{ inputs.poetry-version }}-${{ inputs.cache-key }}-${{ hashFiles(format('{0}/**/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
43
.github/workflows/_lint.yml
vendored
43
.github/workflows/_lint.yml
vendored
@@ -80,31 +80,32 @@ jobs:
|
||||
find "$WORKDIR" -name '*.py' -type f -not -newermt "$OLDEST_COMMIT_TIME" -exec touch -c -m -t '200001010000' '{}' '+'
|
||||
|
||||
echo "oldest-commit=$OLDEST_COMMIT" >> "$GITHUB_OUTPUT"
|
||||
- uses: actions/cache@v3
|
||||
id: cache-pip
|
||||
name: Cache langchain editable pip install - ${{ matrix.python-version }}
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pip
|
||||
key: pip-editable-langchain-deps-${{ runner.os }}-${{ runner.arch }}-py-${{ matrix.python-version }}
|
||||
- name: Install poetry
|
||||
run: |
|
||||
pipx install "poetry==$POETRY_VERSION"
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
cache-dependency-path: |
|
||||
${{ env.WORKDIR }}/**/poetry.lock
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: lint
|
||||
|
||||
- name: Check Poetry File
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry check
|
||||
|
||||
- name: Check lock file
|
||||
shell: bash
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry lock --check
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry install
|
||||
|
||||
- name: Install langchain editable
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
if: ${{ inputs.working-directory != 'libs/langchain' }}
|
||||
@@ -115,7 +116,7 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
CACHE_BASE: black-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "1"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.black_cache
|
||||
@@ -127,7 +128,7 @@ jobs:
|
||||
- name: Get .mypy_cache to speed up mypy
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "15"
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
|
||||
81
.github/workflows/_pydantic_compatibility.yml
vendored
Normal file
81
.github/workflows/_pydantic_compatibility.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: pydantic v1/v2 compatibility
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.5.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Pydantic v1/v2 compatibility - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: pydantic-cross-compat
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: poetry install
|
||||
|
||||
- name: Install the opposite major version of pydantic
|
||||
# If normal tests use pydantic v1, here we'll use v2, and vice versa.
|
||||
shell: bash
|
||||
run: |
|
||||
# Determine the major part of pydantic version
|
||||
REGULAR_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
|
||||
if [[ "$REGULAR_VERSION" == "1" ]]; then
|
||||
PYDANTIC_DEP=">=2.1,<3"
|
||||
TEST_WITH_VERSION="2"
|
||||
elif [[ "$REGULAR_VERSION" == "2" ]]; then
|
||||
PYDANTIC_DEP="<2"
|
||||
TEST_WITH_VERSION="1"
|
||||
else
|
||||
echo "Unexpected pydantic major version '$REGULAR_VERSION', cannot determine which version to use for cross-compatibility test."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install via `pip` instead of `poetry add` to avoid changing lockfile,
|
||||
# which would prevent caching from working: the cache would get saved
|
||||
# to a different key than where it gets loaded from.
|
||||
poetry run pip install "pydantic${PYDANTIC_DEP}"
|
||||
|
||||
# Ensure that the correct pydantic is installed now.
|
||||
echo "Checking pydantic version... Expecting ${TEST_WITH_VERSION}"
|
||||
|
||||
# Determine the major part of pydantic version
|
||||
CURRENT_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
|
||||
# Check that the major part of pydantic version is as expected, if not
|
||||
# raise an error
|
||||
if [[ "$CURRENT_VERSION" != "$TEST_WITH_VERSION" ]]; then
|
||||
echo "Error: expected pydantic version ${CURRENT_VERSION} to have been installed, but found: ${TEST_WITH_VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Found pydantic version ${CURRENT_VERSION}, as expected"
|
||||
- name: Run pydantic compatibility tests
|
||||
shell: bash
|
||||
run: make test
|
||||
15
.github/workflows/_release.yml
vendored
15
.github/workflows/_release.yml
vendored
@@ -23,18 +23,23 @@ jobs:
|
||||
# Trusted publishing has to also be configured on PyPI for each package:
|
||||
# https://docs.pypi.org/trusted-publishers/adding-a-publisher/
|
||||
id-token: write
|
||||
|
||||
# This permission is needed by `ncipollo/release-action` to create the GitHub release.
|
||||
contents: write
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install poetry
|
||||
run: pipx install "poetry==$POETRY_VERSION"
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
|
||||
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "poetry"
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: release
|
||||
|
||||
- name: Build project for distribution
|
||||
run: poetry build
|
||||
- name: Check Version
|
||||
|
||||
63
.github/workflows/_test.yml
vendored
63
.github/workflows/_test.yml
vendored
@@ -7,10 +7,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
test_type:
|
||||
type: string
|
||||
description: "Test types to run"
|
||||
default: '["core", "extended", "core-pydantic-2"]'
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.5.1"
|
||||
@@ -28,61 +24,22 @@ jobs:
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
test_type: ${{ fromJSON(inputs.test_type) }}
|
||||
name: Python ${{ matrix.python-version }} ${{ matrix.test_type }}
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: ${{ matrix.test_type }}
|
||||
install-command: |
|
||||
if [ "${{ matrix.test_type }}" == "core" ]; then
|
||||
echo "Running core tests, installing dependencies with poetry..."
|
||||
poetry install
|
||||
elif [ "${{ matrix.test_type }}" == "core-pydantic-2" ]; then
|
||||
echo "Running core-pydantic-v2 tests, installing dependencies with poetry..."
|
||||
poetry install
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
# Install via `pip` instead of `poetry add` to avoid changing lockfile,
|
||||
# which would prevent caching from working: the cache would get saved
|
||||
# to a different key than where it gets loaded from.
|
||||
poetry run pip install 'pydantic>=2.1,<3'
|
||||
else
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
fi
|
||||
- name: Verify pydantic version
|
||||
run: |
|
||||
if [ "${{ matrix.test_type }}" == "core-pydantic-2" ]; then
|
||||
EXPECTED_VERSION=2
|
||||
else
|
||||
EXPECTED_VERSION=1
|
||||
fi
|
||||
echo "Checking pydantic version... Expecting ${EXPECTED_VERSION}"
|
||||
|
||||
# Determine the major part of pydantic version
|
||||
VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
|
||||
|
||||
# Check that the major part of pydantic version is as expected, if not
|
||||
# raise an error
|
||||
if [[ "$VERSION" -ne $EXPECTED_VERSION ]]; then
|
||||
echo "Error: pydantic version must be equal to ${EXPECTED_VERSION}; Found: ${VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Found pydantic version ${VERSION}, as expected"
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
- name: Run ${{matrix.test_type}} tests
|
||||
run: |
|
||||
case "${{ matrix.test_type }}" in
|
||||
core | core-pydantic-2)
|
||||
make test
|
||||
;;
|
||||
*)
|
||||
make extended_tests
|
||||
;;
|
||||
esac
|
||||
run: poetry install
|
||||
|
||||
- name: Run core tests
|
||||
shell: bash
|
||||
run: make test
|
||||
|
||||
59
.github/workflows/langchain_ci.yml
vendored
59
.github/workflows/langchain_ci.yml
vendored
@@ -8,10 +8,25 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/_test.yml'
|
||||
- '.github/workflows/_pydantic_compatibility.yml'
|
||||
- '.github/workflows/langchain_ci.yml'
|
||||
- 'libs/langchain/**'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.5.1"
|
||||
WORKDIR: "libs/langchain"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
@@ -19,10 +34,50 @@ jobs:
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses:
|
||||
./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
test_type: '["core", "extended", "core-pydantic-2"]'
|
||||
secrets: inherit
|
||||
secrets: inherit
|
||||
|
||||
pydantic-compatibility:
|
||||
uses:
|
||||
./.github/workflows/_pydantic_compatibility.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/langchain
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
90
.github/workflows/langchain_experimental_ci.yml
vendored
90
.github/workflows/langchain_experimental_ci.yml
vendored
@@ -13,6 +13,20 @@ on:
|
||||
- 'libs/experimental/**'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.5.1"
|
||||
WORKDIR: "libs/experimental"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
@@ -20,10 +34,82 @@ jobs:
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses:
|
||||
./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
test_type: '["core"]'
|
||||
secrets: inherit
|
||||
secrets: inherit
|
||||
|
||||
# It's possible that langchain-experimental works fine with the latest *published* langchain,
|
||||
# but is broken with the langchain on `master`.
|
||||
#
|
||||
# We want to catch situations like that *before* releasing a new langchain, hence this test.
|
||||
test-with-latest-langchain:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: test with unpublished langchain - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
cache-key: unpublished-langchain
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running tests with unpublished langchain, installing dependencies with poetry..."
|
||||
poetry install
|
||||
|
||||
echo "Editably installing langchain outside of poetry, to avoid messing up lockfile..."
|
||||
poetry run pip install -e ../langchain
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
extended-tests:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/experimental
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
15
.github/workflows/scheduled_test.yml
vendored
15
.github/workflows/scheduled_test.yml
vendored
@@ -25,18 +25,25 @@ jobs:
|
||||
name: Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/langchain
|
||||
install-command: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
poetry install --with=test_integration
|
||||
cache-key: scheduled
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: libs/langchain
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||
poetry install --with=test_integration
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
make scheduled_tests
|
||||
shell: bash
|
||||
|
||||
@@ -156,7 +156,7 @@ html_context = {
|
||||
html_static_path = ["_static"]
|
||||
|
||||
# These paths are either relative to html_static_path
|
||||
# or fully qualified paths (eg. https://...)
|
||||
# or fully qualified paths (e.g. https://...)
|
||||
html_css_files = [
|
||||
"css/custom.css",
|
||||
]
|
||||
|
||||
@@ -228,7 +228,7 @@ Classes
|
||||
:toctree: {module}
|
||||
"""
|
||||
|
||||
for class_ in classes:
|
||||
for class_ in sorted(classes, key=lambda c: c["qualified_name"]):
|
||||
if not class_["is_public"]:
|
||||
continue
|
||||
|
||||
|
||||
@@ -338,10 +338,11 @@
|
||||
"Neptune Open Cypher QA Chain": "https://python.langchain.com/docs/use_cases/more/graph/neptune_cypher_qa",
|
||||
"NebulaGraphQAChain": "https://python.langchain.com/docs/use_cases/more/graph/graph_nebula_qa",
|
||||
"KuzuQAChain": "https://python.langchain.com/docs/use_cases/more/graph/graph_kuzu_qa",
|
||||
"FalkorDBQAChain": "https://python.langchain.com/docs/use_cases/more/graph/graph_falkordb_qa",
|
||||
"HugeGraph QA Chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_hugegraph_qa",
|
||||
"GraphSparqlQAChain": "https://python.langchain.com/docs/use_cases/more/graph/graph_sparql_qa",
|
||||
"ArangoDB QA chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_arangodb_qa",
|
||||
"Graph DB QA chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_cypher_qa",
|
||||
"Neo4j DB QA chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_cypher_qa",
|
||||
"How to use a SmartLLMChain": "https://python.langchain.com/docs/use_cases/more/self_check/smart_llm",
|
||||
"Multi-Agent Simulated Environment: Petting Zoo": "https://python.langchain.com/docs/use_cases/agent_simulations/petting_zoo",
|
||||
"Multi-agent decentralized speaker selection": "https://python.langchain.com/docs/use_cases/agent_simulations/multiagent_bidding",
|
||||
@@ -2071,8 +2072,8 @@
|
||||
"PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer",
|
||||
"PromptLayer OpenAI": "https://python.langchain.com/docs/integrations/llms/promptlayer_openai"
|
||||
},
|
||||
"DeepLake": {
|
||||
"Deep Lake": "https://python.langchain.com/docs/integrations/providers/deeplake",
|
||||
"Activeloop DeepLake": {
|
||||
"Deep Lake": "https://python.langchain.com/docs/integrations/providers/activeloop_deeplake",
|
||||
"Activeloop's Deep Lake": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake",
|
||||
"Analysis of Twitter the-algorithm source code with LangChain, GPT4 and Activeloop's Deep Lake": "https://python.langchain.com/docs/use_cases/question_answering/how_to/code/twitter-the-algorithm-analysis-deeplake",
|
||||
"Use LangChain, GPT and Activeloop's Deep Lake to work with code base": "https://python.langchain.com/docs/use_cases/question_answering/how_to/code/code-analysis-deeplake",
|
||||
@@ -3174,6 +3175,12 @@
|
||||
"KuzuQAChain": {
|
||||
"KuzuQAChain": "https://python.langchain.com/docs/use_cases/more/graph/graph_kuzu_qa"
|
||||
},
|
||||
"FalkorDBGraph": {
|
||||
"KuzuQAChain": "https://python.langchain.com/docs/use_cases/more/graph/graph_falkordb_qa"
|
||||
},
|
||||
"FalkorDBQAChain": {
|
||||
"FalkorDB QA Chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_falkordb_qa"
|
||||
},
|
||||
"HugeGraphQAChain": {
|
||||
"HugeGraph QA Chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_hugegraph_qa"
|
||||
},
|
||||
@@ -3202,10 +3209,10 @@
|
||||
"Graph QA": "https://python.langchain.com/docs/use_cases/more/graph/graph_qa"
|
||||
},
|
||||
"GraphCypherQAChain": {
|
||||
"Graph DB QA chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_cypher_qa"
|
||||
"Neo4j DB QA chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_cypher_qa"
|
||||
},
|
||||
"Neo4jGraph": {
|
||||
"Graph DB QA chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_cypher_qa"
|
||||
"Neo4j DB QA chain": "https://python.langchain.com/docs/use_cases/more/graph/graph_cypher_qa"
|
||||
},
|
||||
"LLMBashChain": {
|
||||
"Bash chain": "https://python.langchain.com/docs/use_cases/more/code_writing/llm_bash"
|
||||
|
||||
@@ -5,9 +5,10 @@
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta http-equiv="Refresh" content="0; url={{ redirect }}" />
|
||||
<meta name="Description" content="scikit-learn: machine learning in Python">
|
||||
<meta name="robots" content="follow, index">
|
||||
<meta name="Description" content="Python API reference for LangChain.">
|
||||
<link rel="canonical" href="{{ redirect }}" />
|
||||
<title>scikit-learn: machine learning in Python</title>
|
||||
<title>LangChain Python API Reference Documentation.</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>You will be automatically redirected to the <a href="{{ redirect }}">new location of this page</a>.</p>
|
||||
|
||||
@@ -47,8 +47,8 @@ If you’re working on something you’re proud of, and think the LangChain comm
|
||||
|
||||
Here’s where our team hangs out, talks shop, spotlights cool work, and shares what we’re up to. We’d love to see you there too.
|
||||
|
||||
- **[Twitter](https://twitter.com/LangChainAI):** we post about what we’re working on and what cool things we’re seeing in the space. If you tag @langchainai in your post, we’ll almost certainly see it, and can snow you some love!
|
||||
- **[Discord](https://discord.gg/6adMQxSpJS):** connect with with >30k developers who are building with LangChain
|
||||
- **[Twitter](https://twitter.com/LangChainAI):** we post about what we’re working on and what cool things we’re seeing in the space. If you tag @langchainai in your post, we’ll almost certainly see it, and can show you some love!
|
||||
- **[Discord](https://discord.gg/6adMQxSpJS):** connect with >30k developers who are building with LangChain
|
||||
- **[GitHub](https://github.com/langchain-ai/langchain):** open pull requests, contribute to a discussion, and/or contribute
|
||||
- **[Subscribe to our bi-weekly Release Notes](https://6w1pwbss0py.typeform.com/to/KjZB1auB):** a twice/month email roundup of the coolest things going on in our orbit
|
||||
- **Slack:** if you’re building an application in production at your company, we’d love to get into a Slack channel together. Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) and we’ll get in touch about setting one up.
|
||||
|
||||
14
docs/docs_skeleton/docs/expression_language/index.mdx
Normal file
14
docs/docs_skeleton/docs/expression_language/index.mdx
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
sidebar_class_name: hidden
|
||||
---
|
||||
|
||||
# LangChain Expression Language (LCEL)
|
||||
|
||||
LangChain Expression Language or LCEL is a declarative way to easily compose chains together.
|
||||
Any chain constructed this way will automatically have full sync, async, and streaming support.
|
||||
|
||||
#### [Interface](/docs/expression_language/interface)
|
||||
The base interface shared by all LCEL objects
|
||||
|
||||
#### [Cookbook](/docs/expression_language/cookbook)
|
||||
Examples of common LCEL usage patterns
|
||||
@@ -42,23 +42,22 @@ Log and stream intermediate steps of any chain
|
||||
## Examples, ecosystem, and resources
|
||||
### [Use cases](/docs/use_cases/)
|
||||
Walkthroughs and best-practices for common end-to-end use cases, like:
|
||||
- [Chatbots](/docs/use_cases/chatbots/)
|
||||
- [Chatbots](/docs/use_cases/chatbots)
|
||||
- [Answering questions using sources](/docs/use_cases/question_answering/)
|
||||
- [Analyzing structured data](/docs/use_cases/tabular.html)
|
||||
- [Analyzing structured data](/docs/use_cases/sql)
|
||||
- and much more...
|
||||
|
||||
### [Guides](/docs/guides/)
|
||||
Learn best practices for developing with LangChain.
|
||||
|
||||
### [Ecosystem](/docs/ecosystem/)
|
||||
LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/docs/integrations/) and [dependent repos](/docs/ecosystem/dependents).
|
||||
LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/docs/integrations/) and [dependent repos](/docs/additional_resources/dependents).
|
||||
|
||||
### [Additional resources](/docs/additional_resources/)
|
||||
Our community is full of prolific developers, creative builders, and fantastic teachers. Check out [YouTube tutorials](/docs/additional_resources/youtube.html) for great tutorials from folks in the community, and [Gallery](https://github.com/kyrolabs/awesome-langchain) for a list of awesome LangChain projects, compiled by the folks at [KyroLabs](https://kyrolabs.com).
|
||||
|
||||
<h3><span style={{color:"#2e8555"}}> Support </span></h3>
|
||||
|
||||
Join us on [GitHub](https://github.com/hwchase17/langchain) or [Discord](https://discord.gg/6adMQxSpJS) to ask questions, share feedback, meet other developers building with LangChain, and dream about the future of LLM’s.
|
||||
### [Community](/docs/community)
|
||||
Head to the [Community navigator](/docs/community) to find places to ask questions, share feedback, meet other developers, and dream about the future of LLM’s.
|
||||
|
||||
## API reference
|
||||
|
||||
|
||||
@@ -59,8 +59,8 @@ LangChain provides several objects to easily distinguish between different roles
|
||||
If none of those roles sound right, there is also a `ChatMessage` class where you can specify the role manually.
|
||||
For more information on how to use these different messages most effectively, see our prompting guide.
|
||||
|
||||
LangChain exposes a standard interface for both, but it's useful to understand this difference in order to construct prompts for a given language model.
|
||||
The standard interface that LangChain exposes has two methods:
|
||||
LangChain provides a standard interface for both, but it's useful to understand this difference in order to construct prompts for a given language model.
|
||||
The standard interface that LangChain provides has two methods:
|
||||
- `predict`: Takes in a string, returns a string
|
||||
- `predict_messages`: Takes in a list of messages, returns a message.
|
||||
|
||||
@@ -107,7 +107,7 @@ import PromptTemplateChatModel from "@snippets/get_started/quickstart/prompt_tem
|
||||
<PromptTemplateLLM/>
|
||||
|
||||
However, the advantages of using these over raw string formatting are several.
|
||||
You can "partial" out variables - eg you can format only some of the variables at a time.
|
||||
You can "partial" out variables - e.g. you can format only some of the variables at a time.
|
||||
You can compose them together, easily combining different templates into a single prompt.
|
||||
For explanations of these functionalities, see the [section on prompts](/docs/modules/model_io/prompts) for more detail.
|
||||
|
||||
@@ -121,12 +121,12 @@ Let's take a look at this below:
|
||||
|
||||
ChatPromptTemplates can also include other things besides ChatMessageTemplates - see the [section on prompts](/docs/modules/model_io/prompts) for more detail.
|
||||
|
||||
## Output Parsers
|
||||
## Output parsers
|
||||
|
||||
OutputParsers convert the raw output of an LLM into a format that can be used downstream.
|
||||
There are few main type of OutputParsers, including:
|
||||
|
||||
- Convert text from LLM -> structured information (eg JSON)
|
||||
- Convert text from LLM -> structured information (e.g. JSON)
|
||||
- Convert a ChatMessage into just a string
|
||||
- Convert the extra information returned from a call besides the message (like OpenAI function invocation) into a string.
|
||||
|
||||
@@ -149,7 +149,7 @@ import LLMChain from "@snippets/get_started/quickstart/llm_chain.mdx"
|
||||
|
||||
<LLMChain/>
|
||||
|
||||
## Next Steps
|
||||
## Next steps
|
||||
|
||||
This is it!
|
||||
We've now gone over how to create the core building block of LangChain applications - the LLMChains.
|
||||
|
||||
@@ -3,7 +3,7 @@ sidebar_position: 3
|
||||
---
|
||||
# Comparison Evaluators
|
||||
|
||||
Comparison evaluators in LangChain help measure two different chain or LLM outputs. These evaluators are helpful for comparative analyses, such as A/B testing between two language models, or comparing different versions of the same model. They can also be useful for things like generating preference scores for ai-assisted reinforcement learning.
|
||||
Comparison evaluators in LangChain help measure two different chains or LLM outputs. These evaluators are helpful for comparative analyses, such as A/B testing between two language models, or comparing different versions of the same model. They can also be useful for things like generating preference scores for ai-assisted reinforcement learning.
|
||||
|
||||
These evaluators inherit from the `PairwiseStringEvaluator` class, providing a comparison interface for two strings - typically, the outputs from two different prompts or models, or two versions of the same model. In essence, a comparison evaluator performs an evaluation on a pair of strings and returns a dictionary containing the evaluation score and other relevant details.
|
||||
|
||||
@@ -16,7 +16,7 @@ Here's a summary of the key methods and properties of a comparison evaluator:
|
||||
- `requires_input`: This property indicates whether this evaluator requires an input string.
|
||||
- `requires_reference`: This property specifies whether this evaluator requires a reference label.
|
||||
|
||||
Detailed information about creating custom evaluators and the available built-in comparison evaluators are provided in the following sections.
|
||||
Detailed information about creating custom evaluators and the available built-in comparison evaluators is provided in the following sections.
|
||||
|
||||
import DocCardList from "@theme/DocCardList";
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
---
|
||||
sidebar_position: 6
|
||||
---
|
||||
|
||||
import DocCardList from "@theme/DocCardList";
|
||||
|
||||
# Evaluation
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# LangChain Expression Language
|
||||
|
||||
import DocCardList from "@theme/DocCardList";
|
||||
|
||||
LangChain Expression Language is a declarative way to easily compose chains together.
|
||||
Any chain constructed this way will automatically have full sync, async, and streaming support.
|
||||
See guides below for how to interact with chains constructed this way as well as cookbook examples.
|
||||
|
||||
<DocCardList />
|
||||
1422
docs/docs_skeleton/docs/guides/safety/amazon_comprehend_chain.ipynb
Normal file
1422
docs/docs_skeleton/docs/guides/safety/amazon_comprehend_chain.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,8 @@
|
||||
# Preventing harmful outputs
|
||||
# Moderation
|
||||
|
||||
One of the key concerns with using LLMs is that they may generate harmful or unethical text. This is an area of active research in the field. Here we present some built-in chains inspired by this research, which are intended to make the outputs of LLMs safer.
|
||||
|
||||
- [Moderation chain](/docs/use_cases/safety/moderation): Explicitly check if any output text is harmful and flag it.
|
||||
- [Constitutional chain](/docs/use_cases/safety/constitutional_chain): Prompt the model with a set of principles which should guide it's behavior.
|
||||
- [Moderation chain](/docs/guides/safety/moderation): Explicitly check if any output text is harmful and flag it.
|
||||
- [Constitutional chain](/docs/guides/safety/constitutional_chain): Prompt the model with a set of principles which should guide it's behavior.
|
||||
- [Logical Fallacy chain](/docs/guides/safety/logical_fallacy_chain): Checks the model output against logical fallacies to correct any deviation.
|
||||
- [Amazon Comprehend moderation chain](/docs/guides/safety/amazon_comprehend_chain): Use [Amazon Comprehend](https://aws.amazon.com/comprehend/) to detect and handle PII and toxicity.
|
||||
|
||||
@@ -0,0 +1,85 @@
|
||||
# Removing logical fallacies from model output
|
||||
Logical fallacies are flawed reasoning or false arguments that can undermine the validity of a model's outputs. Examples include circular reasoning, false
|
||||
dichotomies, ad hominem attacks, etc. Machine learning models are optimized to perform well on specific metrics like accuracy, perplexity, or loss. However,
|
||||
optimizing for metrics alone does not guarantee logically sound reasoning.
|
||||
|
||||
Language models can learn to exploit flaws in reasoning to generate plausible-sounding but logically invalid arguments. When models rely on fallacies, their outputs become unreliable and untrustworthy, even if they achieve high scores on metrics. Users cannot depend on such outputs. Propagating logical fallacies can spread misinformation, confuse users, and lead to harmful real-world consequences when models are deployed in products or services.
|
||||
|
||||
Monitoring and testing specifically for logical flaws is challenging unlike other quality issues. It requires reasoning about arguments rather than pattern matching.
|
||||
|
||||
Therefore, it is crucial that model developers proactively address logical fallacies after optimizing metrics. Specialized techniques like causal modeling, robustness testing, and bias mitigation can help avoid flawed reasoning. Overall, allowing logical flaws to persist makes models less safe and ethical. Eliminating fallacies ensures model outputs remain logically valid and aligned with human reasoning. This maintains user trust and mitigates risks.
|
||||
|
||||
|
||||
|
||||
```python
|
||||
# Imports
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain_experimental.fallacy_removal.base import FallacyChain
|
||||
```
|
||||
|
||||
```python
|
||||
# Example of a model output being returned with a logical fallacy
|
||||
misleading_prompt = PromptTemplate(
|
||||
template="""You have to respond by using only logical fallacies inherent in your answer explanations.
|
||||
|
||||
Question: {question}
|
||||
|
||||
Bad answer:""",
|
||||
input_variables=["question"],
|
||||
)
|
||||
|
||||
llm = OpenAI(temperature=0)
|
||||
|
||||
misleading_chain = LLMChain(llm=llm, prompt=misleading_prompt)
|
||||
|
||||
misleading_chain.run(question="How do I know the earth is round?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
'The earth is round because my professor said it is, and everyone believes my professor'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
|
||||
```python
|
||||
fallacies = FallacyChain.get_fallacies(["correction"])
|
||||
fallacy_chain = FallacyChain.from_llm(
|
||||
chain=misleading_chain,
|
||||
logical_fallacies=fallacies,
|
||||
llm=llm,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
fallacy_chain.run(question="How do I know the earth is round?")
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
|
||||
|
||||
> Entering new FallacyChain chain...
|
||||
Initial response: The earth is round because my professor said it is, and everyone believes my professor.
|
||||
|
||||
Applying correction...
|
||||
|
||||
Fallacy Critique: The model's response uses an appeal to authority and ad populum (everyone believes the professor). Fallacy Critique Needed.
|
||||
|
||||
Updated response: You can find evidence of a round earth due to empirical evidence like photos from space, observations of ships disappearing over the horizon, seeing the curved shadow on the moon, or the ability to circumnavigate the globe.
|
||||
|
||||
|
||||
> Finished chain.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
'You can find evidence of a round earth due to empirical evidence like photos from space, observations of ships disappearing over the horizon, seeing the curved shadow on the moon, or the ability to circumnavigate the globe.'
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
@@ -37,11 +37,11 @@ This agent is designed to be used in conversational settings.
|
||||
The prompt is designed to make the agent helpful and conversational.
|
||||
It uses the ReAct framework to decide which tool to use, and uses memory to remember the previous conversation interactions.
|
||||
|
||||
### [Self ask with search](/docs/modules/agents/agent_types/self_ask_with_search.html)
|
||||
### [Self-ask with search](/docs/modules/agents/agent_types/self_ask_with_search.html)
|
||||
|
||||
This agent utilizes a single tool that should be named `Intermediate Answer`.
|
||||
This tool should be able to lookup factual answers to questions. This agent
|
||||
is equivalent to the original [self ask with search paper](https://ofir.io/self-ask.pdf),
|
||||
is equivalent to the original [self-ask with search paper](https://ofir.io/self-ask.pdf),
|
||||
where a Google search API was provided as the tool.
|
||||
|
||||
### [ReAct document store](/docs/modules/agents/agent_types/react_docstore.html)
|
||||
@@ -54,4 +54,4 @@ This agent is equivalent to the
|
||||
original [ReAct paper](https://arxiv.org/pdf/2210.03629.pdf), specifically the Wikipedia example.
|
||||
|
||||
## [Plan-and-execute agents](/docs/modules/agents/agent_types/plan_and_execute.html)
|
||||
Plan and execute agents accomplish an objective by first planning what to do, then executing the sub tasks. This idea is largely inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) and then the ["Plan-and-Solve" paper](https://arxiv.org/abs/2305.04091).
|
||||
Plan-and-execute agents accomplish an objective by first planning what to do, then executing the sub tasks. This idea is largely inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) and then the ["Plan-and-Solve" paper](https://arxiv.org/abs/2305.04091).
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Plan and execute
|
||||
# Plan-and-execute
|
||||
|
||||
Plan and execute agents accomplish an objective by first planning what to do, then executing the sub tasks. This idea is largely inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) and then the ["Plan-and-Solve" paper](https://arxiv.org/abs/2305.04091).
|
||||
Plan-and-execute agents accomplish an objective by first planning what to do, then executing the sub tasks. This idea is largely inspired by [BabyAGI](https://github.com/yoheinakajima/babyagi) and then the ["Plan-and-Solve" paper](https://arxiv.org/abs/2305.04091).
|
||||
|
||||
The planning is almost always done by an LLM.
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# Custom LLM Agent
|
||||
# Custom LLM agent
|
||||
|
||||
This notebook goes through how to create your own custom LLM agent.
|
||||
|
||||
An LLM agent consists of three parts:
|
||||
|
||||
- PromptTemplate: This is the prompt template that can be used to instruct the language model on what to do
|
||||
- `PromptTemplate`: This is the prompt template that can be used to instruct the language model on what to do
|
||||
- LLM: This is the language model that powers the agent
|
||||
- `stop` sequence: Instructs the LLM to stop generating as soon as this string is found
|
||||
- OutputParser: This determines how to parse the LLMOutput into an AgentAction or AgentFinish object
|
||||
- `OutputParser`: This determines how to parse the LLM output into an `AgentAction` or `AgentFinish` object
|
||||
|
||||
import Example from "@snippets/modules/agents/how_to/custom_llm_agent.mdx"
|
||||
|
||||
|
||||
@@ -4,10 +4,10 @@ This notebook goes through how to create your own custom agent based on a chat m
|
||||
|
||||
An LLM chat agent consists of three parts:
|
||||
|
||||
- PromptTemplate: This is the prompt template that can be used to instruct the language model on what to do
|
||||
- ChatModel: This is the language model that powers the agent
|
||||
- `PromptTemplate`: This is the prompt template that can be used to instruct the language model on what to do
|
||||
- `ChatModel`: This is the language model that powers the agent
|
||||
- `stop` sequence: Instructs the LLM to stop generating as soon as this string is found
|
||||
- OutputParser: This determines how to parse the LLMOutput into an AgentAction or AgentFinish object
|
||||
- `OutputParser`: This determines how to parse the LLM output into an `AgentAction` or `AgentFinish` object
|
||||
|
||||
import Example from "@snippets/modules/agents/how_to/custom_llm_chat_agent.mdx"
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ sidebar_position: 2
|
||||
---
|
||||
# Documents
|
||||
|
||||
These are the core chains for working with Documents. They are useful for summarizing documents, answering questions over documents, extracting information from documents, and more.
|
||||
These are the core chains for working with documents. They are useful for summarizing documents, answering questions over documents, extracting information from documents, and more.
|
||||
|
||||
These chains all implement a common interface:
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@ sidebar_position: 1
|
||||
---
|
||||
# Refine
|
||||
|
||||
The refine documents chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
|
||||
The Refine documents chain constructs a response by looping over the input documents and iteratively updating its answer. For each document, it passes all non-document inputs, the current document, and the latest intermediate answer to an LLM chain to get a new answer.
|
||||
|
||||
Since the Refine chain only passes a single document to the LLM at a time, it is well-suited for tasks that require analyzing more documents than can fit in the model's context.
|
||||
The obvious tradeoff is that this chain will make far more LLM calls than, for example, the Stuff documents chain.
|
||||
There are also certain tasks which are difficult to accomplish iteratively. For example, the Refine chain can perform poorly when documents frequently cross-reference one another or when a task requires detailed information from many documents.
|
||||
|
||||

|
||||

|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# LLM
|
||||
|
||||
An LLMChain is a simple chain that adds some functionality around language models. It is used widely throughout LangChain, including in other chains and agents.
|
||||
An `LLMChain` is a simple chain that adds some functionality around language models. It is used widely throughout LangChain, including in other chains and agents.
|
||||
|
||||
An LLMChain consists of a PromptTemplate and a language model (either an LLM or chat model). It formats the prompt template using the input key values provided (and also memory key values, if available), passes the formatted string to LLM and returns the LLM output.
|
||||
An `LLMChain` consists of a `PromptTemplate` and a language model (either an LLM or chat model). It formats the prompt template using the input key values provided (and also memory key values, if available), passes the formatted string to LLM and returns the LLM output.
|
||||
|
||||
## Get started
|
||||
|
||||
import Example from "@snippets/modules/chains/foundational/llm_chain.mdx"
|
||||
|
||||
<Example/>
|
||||
<Example/>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
The next step after calling a language model is make a series of calls to a language model. This is particularly useful when you want to take the output from one call and use it as the input to another.
|
||||
|
||||
In this notebook we will walk through some examples for how to do this, using sequential chains. Sequential chains allow you to connect multiple chains and compose them into pipelines that execute some specific scenario.. There are two types of sequential chains:
|
||||
In this notebook we will walk through some examples for how to do this, using sequential chains. Sequential chains allow you to connect multiple chains and compose them into pipelines that execute some specific scenario. There are two types of sequential chains:
|
||||
|
||||
- `SimpleSequentialChain`: The simplest form of sequential chains, where each step has a singular input/output, and the output of one step is the input to the next.
|
||||
- `SequentialChain`: A more general form of sequential chains, allowing for multiple inputs/outputs.
|
||||
|
||||
@@ -30,4 +30,4 @@ Chains allow us to combine multiple components together to create a single, cohe
|
||||
|
||||
import GetStarted from "@snippets/modules/chains/get_started.mdx"
|
||||
|
||||
<GetStarted/>
|
||||
<GetStarted/>
|
||||
|
||||
@@ -11,7 +11,7 @@ Use document loaders to load data from a source as `Document`'s. A `Document` is
|
||||
and associated metadata. For example, there are document loaders for loading a simple `.txt` file, for loading the text
|
||||
contents of any web page, or even for loading a transcript of a YouTube video.
|
||||
|
||||
Document loaders expose a "load" method for loading data as documents from a configured source. They optionally
|
||||
Document loaders provide a "load" method for loading data as documents from a configured source. They optionally
|
||||
implement a "lazy load" as well for lazily loading data into memory.
|
||||
|
||||
## Get started
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
This is the simplest method. This splits based on characters (by default "\n\n") and measure chunk length by number of characters.
|
||||
|
||||
1. How the text is split: by single character
|
||||
2. How the chunk size is measured: by number of characters
|
||||
1. How the text is split: by single character.
|
||||
2. How the chunk size is measured: by number of characters.
|
||||
|
||||
import Example from "@snippets/modules/data_connection/document_transformers/text_splitters/character_text_splitter.mdx"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Split code
|
||||
|
||||
CodeTextSplitter allows you to split your code with multiple language support. Import enum `Language` and specify the language.
|
||||
CodeTextSplitter allows you to split your code with multiple languages supported. Import enum `Language` and specify the language.
|
||||
|
||||
import Example from "@snippets/modules/data_connection/document_transformers/text_splitters/code_splitter.mdx"
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
This text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is `["\n\n", "\n", " ", ""]`. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.
|
||||
|
||||
1. How the text is split: by list of characters
|
||||
2. How the chunk size is measured: by number of characters
|
||||
1. How the text is split: by list of characters.
|
||||
2. How the chunk size is measured: by number of characters.
|
||||
|
||||
import Example from "@snippets/modules/data_connection/document_transformers/text_splitters/recursive_text_splitter.mdx"
|
||||
|
||||
|
||||
@@ -18,9 +18,9 @@ This encompasses several key modules.
|
||||
**[Document loaders](/docs/modules/data_connection/document_loaders/)**
|
||||
|
||||
Load documents from many different sources.
|
||||
LangChain provides over a 100 different document loaders as well as integrations with other major providers in the space,
|
||||
LangChain provides over 100 different document loaders as well as integrations with other major providers in the space,
|
||||
like AirByte and Unstructured.
|
||||
We provide integrations to load all types of documents (html, PDF, code) from all types of locations (private s3 buckets, public websites).
|
||||
We provide integrations to load all types of documents (HTML, PDF, code) from all types of locations (private s3 buckets, public websites).
|
||||
|
||||
**[Document transformers](/docs/modules/data_connection/document_transformers/)**
|
||||
|
||||
@@ -32,18 +32,18 @@ LangChain provides several different algorithms for doing this, as well as logic
|
||||
**[Text embedding models](/docs/modules/data_connection/text_embedding/)**
|
||||
|
||||
Another key part of retrieval has become creating embeddings for documents.
|
||||
Embeddings capture the semantic meaning of text, allowing you to quickly and
|
||||
Embeddings capture the semantic meaning of the text, allowing you to quickly and
|
||||
efficiently find other pieces of text that are similar.
|
||||
LangChain provides integrations with over 25 different embedding providers and methods,
|
||||
from open-source to proprietary API,
|
||||
allowing you to choose the one best suited for your needs.
|
||||
LangChain exposes a standard interface, allowing you to easily swap between models.
|
||||
LangChain provides a standard interface, allowing you to easily swap between models.
|
||||
|
||||
**[Vector stores](/docs/modules/data_connection/vectorstores/)**
|
||||
|
||||
With the rise of embeddings, there has emerged a need for databases to support efficient storage and searching of these embeddings.
|
||||
LangChain provides integrations with over 50 different vectorstores, from open-source local ones to cloud-hosted proprietary ones,
|
||||
allowing you choose the one best suited for your needs.
|
||||
allowing you to choose the one best suited for your needs.
|
||||
LangChain exposes a standard interface, allowing you to easily swap between vector stores.
|
||||
|
||||
**[Retrievers](/docs/modules/data_connection/retrievers/)**
|
||||
@@ -55,7 +55,7 @@ However, we have also added a collection of algorithms on top of this to increas
|
||||
These include:
|
||||
|
||||
- [Parent Document Retriever](/docs/modules/data_connection/retrievers/parent_document_retriever): This allows you to create multiple embeddings per parent document, allowing you to look up smaller chunks but return larger context.
|
||||
- [Self Query Retriever](/docs/modules/data_connection/retrievers/self_query): User questions often contain reference to something that isn't just semantic, but rather expresses some logic that can best be represented as a metadata filter. Self-query allows you to parse out the *semantic* part of a query from other *metadata filters* present in the query
|
||||
- [Self Query Retriever](/docs/modules/data_connection/retrievers/self_query): User questions often contain a reference to something that isn't just semantic but rather expresses some logic that can best be represented as a metadata filter. Self-query allows you to parse out the *semantic* part of a query from other *metadata filters* present in the query.
|
||||
- [Ensemble Retriever](/docs/modules/data_connection/retrievers/ensemble): Sometimes you may want to retrieve documents from multiple different sources, or using multiple different algorithms. The ensemble retriever allows you to easily do this.
|
||||
- And more!
|
||||
|
||||
|
||||
@@ -5,10 +5,10 @@ One challenge with retrieval is that usually you don't know the specific queries
|
||||
Contextual compression is meant to fix this. The idea is simple: instead of immediately returning retrieved documents as-is, you can compress them using the context of the given query, so that only the relevant information is returned. “Compressing” here refers to both compressing the contents of an individual document and filtering out documents wholesale.
|
||||
|
||||
To use the Contextual Compression Retriever, you'll need:
|
||||
- a base Retriever
|
||||
- a base retriever
|
||||
- a Document Compressor
|
||||
|
||||
The Contextual Compression Retriever passes queries to the base Retriever, takes the initial documents and passes them through the Document Compressor. The Document Compressor takes a list of Documents and shortens it by reducing the contents of Documents or dropping Documents altogether.
|
||||
The Contextual Compression Retriever passes queries to the base retriever, takes the initial documents and passes them through the Document Compressor. The Document Compressor takes a list of documents and shortens it by reducing the contents of documents or dropping documents altogether.
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ Head to [Integrations](/docs/integrations/retrievers/) for documentation on buil
|
||||
:::
|
||||
|
||||
A retriever is an interface that returns documents given an unstructured query. It is more general than a vector store.
|
||||
A retriever does not need to be able to store documents, only to return (or retrieve) it. Vector stores can be used
|
||||
A retriever does not need to be able to store documents, only to return (or retrieve) them. Vector stores can be used
|
||||
as the backbone of a retriever, but there are other types of retrievers as well.
|
||||
|
||||
## Get started
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Self-querying
|
||||
|
||||
A self-querying retriever is one that, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses a query-constructing LLM chain to write a structured query and then applies that structured query to it's underlying VectorStore. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documented, but to also extract filters from the user query on the metadata of stored documents and to execute those filters.
|
||||
A self-querying retriever is one that, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses a query-constructing LLM chain to write a structured query and then applies that structured query to its underlying VectorStore. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documents but to also extract filters from the user query on the metadata of stored documents and to execute those filters.
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ The algorithm for scoring them is:
|
||||
semantic_similarity + (1.0 - decay_rate) ^ hours_passed
|
||||
```
|
||||
|
||||
Notably, `hours_passed` refers to the hours passed since the object in the retriever **was last accessed**, not since it was created. This means that frequently accessed objects remain "fresh."
|
||||
Notably, `hours_passed` refers to the hours passed since the object in the retriever **was last accessed**, not since it was created. This means that frequently accessed objects remain "fresh".
|
||||
|
||||
import Example from "@snippets/modules/data_connection/retrievers/how_to/time_weighted_vectorstore.mdx"
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# Vector store-backed retriever
|
||||
|
||||
A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the Vector Store class to make it conform to the Retriever interface.
|
||||
A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever interface.
|
||||
It uses the search methods implemented by a vector store, like similarity search and MMR, to query the texts in the vector store.
|
||||
|
||||
Once you construct a Vector store, it's very easy to construct a retriever. Let's walk through an example.
|
||||
Once you construct a vector store, it's very easy to construct a retriever. Let's walk through an example.
|
||||
|
||||
import Example from "@snippets/modules/data_connection/retrievers/how_to/vectorstore.mdx"
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ The Embeddings class is a class designed for interfacing with text embedding mod
|
||||
|
||||
Embeddings create a vector representation of a piece of text. This is useful because it means we can think about text in the vector space, and do things like semantic search where we look for pieces of text that are most similar in the vector space.
|
||||
|
||||
The base Embeddings class in LangChain exposes two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
|
||||
The base Embeddings class in LangChain provides two methods: one for embedding documents and one for embedding a query. The former takes as input multiple texts, while the latter takes a single text. The reason for having these as two separate methods is that some embedding providers have different embedding methods for documents (to be searched over) vs queries (the search query itself).
|
||||
|
||||
## Get started
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ for you.
|
||||
|
||||
## Get started
|
||||
|
||||
This walkthrough showcases basic functionality related to VectorStores. A key part of working with vector stores is creating the vector to put in them, which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [text embedding model](/docs/modules/data_connection/text_embedding/) interfaces before diving into this.
|
||||
This walkthrough showcases basic functionality related to vector stores. A key part of working with vector stores is creating the vector to put in them, which is usually created via embeddings. Therefore, it is recommended that you familiarize yourself with the [text embedding model](/docs/modules/data_connection/text_embedding/) interfaces before diving into this.
|
||||
|
||||
import GetStarted from "@snippets/modules/data_connection/vectorstores/get_started.mdx"
|
||||
|
||||
|
||||
@@ -8,10 +8,10 @@ Head to [Integrations](/docs/integrations/memory/) for documentation on built-in
|
||||
:::
|
||||
|
||||
One of the core utility classes underpinning most (if not all) memory modules is the `ChatMessageHistory` class.
|
||||
This is a super lightweight wrapper which exposes convenience methods for saving Human messages, AI messages, and then fetching them all.
|
||||
This is a super lightweight wrapper which provides convenience methods for saving HumanMessages, AIMessages, and then fetching them all.
|
||||
|
||||
You may want to use this class directly if you are managing memory outside of a chain.
|
||||
|
||||
import GetStarted from "@snippets/modules/memory/chat_messages/get_started.mdx"
|
||||
|
||||
<GetStarted/>
|
||||
<GetStarted/>
|
||||
|
||||
@@ -32,7 +32,7 @@ Even if these are not all used directly, they need to be stored in some form.
|
||||
One of the key parts of the LangChain memory module is a series of integrations for storing these chat messages,
|
||||
from in-memory lists to persistent databases.
|
||||
|
||||
- [Chat message storage](/docs/modules/memory/chat_messages/): How to work with Chat Messages, and the various integrations offered
|
||||
- [Chat message storage](/docs/modules/memory/chat_messages/): How to work with Chat Messages, and the various integrations offered.
|
||||
|
||||
### Querying: Data structures and algorithms on top of chat messages
|
||||
Keeping a list of chat messages is fairly straight-forward.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Conversation buffer memory
|
||||
# Conversation Buffer
|
||||
|
||||
This notebook shows how to use `ConversationBufferMemory`. This memory allows for storing of messages and then extracts the messages in a variable.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Conversation buffer window memory
|
||||
# Conversation Buffer Window
|
||||
|
||||
`ConversationBufferWindowMemory` keeps a list of the interactions of the conversation over time. It only uses the last K interactions. This can be useful for keeping a sliding window of the most recent interactions, so the buffer does not get too large
|
||||
`ConversationBufferWindowMemory` keeps a list of the interactions of the conversation over time. It only uses the last K interactions. This can be useful for keeping a sliding window of the most recent interactions, so the buffer does not get too large.
|
||||
|
||||
Let's first explore the basic functionality of this type of memory.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Entity memory
|
||||
# Entity
|
||||
|
||||
Entity Memory remembers given facts about specific entities in a conversation. It extracts information on entities (using an LLM) and builds up its knowledge about that entity over time (also using an LLM).
|
||||
Entity memory remembers given facts about specific entities in a conversation. It extracts information on entities (using an LLM) and builds up its knowledge about that entity over time (also using an LLM).
|
||||
|
||||
Let's first walk through using this functionality.
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
---
|
||||
# Memory Types
|
||||
# Memory types
|
||||
|
||||
There are many different types of memory.
|
||||
Each have their own parameters, their own return types, and are useful in different scenarios.
|
||||
Each has their own parameters, their own return types, and is useful in different scenarios.
|
||||
Please see their individual page for more detail on each one.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Conversation summary memory
|
||||
# Conversation Summary
|
||||
Now let's take a look at using a slightly more complex type of memory - `ConversationSummaryMemory`. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.
|
||||
Conversation summary memory summarizes the conversation as it happens and stores the current summary in memory. This memory can then be used to inject the summary of the conversation so far into a prompt/chain. This memory is most useful for longer conversations, where keeping the past message history in the prompt verbatim would take up too many tokens.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Vector store-backed memory
|
||||
# Backed by a Vector Store
|
||||
|
||||
`VectorStoreRetrieverMemory` stores memories in a VectorDB and queries the top-K most "salient" docs every time it is called.
|
||||
`VectorStoreRetrieverMemory` stores memories in a vector store and queries the top-K most "salient" docs every time it is called.
|
||||
|
||||
This differs from most of the other Memory classes in that it doesn't explicitly track the order of interactions.
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Caching
|
||||
LangChain provides an optional caching layer for Chat Models. This is useful for two reasons:
|
||||
LangChain provides an optional caching layer for chat models. This is useful for two reasons:
|
||||
|
||||
It can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times.
|
||||
It can speed up your application by reducing the number of API calls you make to the LLM provider.
|
||||
|
||||
@@ -8,8 +8,8 @@ Head to [Integrations](/docs/integrations/chat/) for documentation on built-in i
|
||||
:::
|
||||
|
||||
Chat models are a variation on language models.
|
||||
While chat models use language models under the hood, the interface they expose is a bit different.
|
||||
Rather than expose a "text in, text out" API, they expose an interface where "chat messages" are the inputs and outputs.
|
||||
While chat models use language models under the hood, the interface they use is a bit different.
|
||||
Rather than using a "text in, text out" API, they use an interface where "chat messages" are the inputs and outputs.
|
||||
|
||||
Chat model APIs are fairly new, so we are still figuring out the correct abstractions.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Prompts
|
||||
|
||||
Prompts for Chat models are built around messages, instead of just plain text.
|
||||
Prompts for chat models are built around messages, instead of just plain text.
|
||||
|
||||
import Prompts from "@snippets/modules/model_io/models/chat/how_to/prompts.mdx"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Streaming
|
||||
|
||||
Some Chat models provide a streaming response. This means that instead of waiting for the entire response to be returned, you can start processing it as soon as it's available. This is useful if you want to display the response to the user as it's being generated, or if you want to process the response as it's being generated.
|
||||
Some chat models provide a streaming response. This means that instead of waiting for the entire response to be returned, you can start processing it as soon as it's available. This is useful if you want to display the response to the user as it's being generated, or if you want to process the response as it's being generated.
|
||||
|
||||
import StreamingChatModel from "@snippets/modules/model_io/models/chat/how_to/streaming.mdx"
|
||||
|
||||
|
||||
@@ -8,16 +8,16 @@ LangChain provides interfaces and integrations for two types of models:
|
||||
- [LLMs](/docs/modules/model_io/models/llms/): Models that take a text string as input and return a text string
|
||||
- [Chat models](/docs/modules/model_io/models/chat/): Models that are backed by a language model but take a list of Chat Messages as input and return a Chat Message
|
||||
|
||||
## LLMs vs Chat Models
|
||||
## LLMs vs chat models
|
||||
|
||||
LLMs and Chat Models are subtly but importantly different. LLMs in LangChain refer to pure text completion models.
|
||||
LLMs and chat models are subtly but importantly different. LLMs in LangChain refer to pure text completion models.
|
||||
The APIs they wrap take a string prompt as input and output a string completion. OpenAI's GPT-3 is implemented as an LLM.
|
||||
Chat models are often backed by LLMs but tuned specifically for having conversations.
|
||||
And, crucially, their provider APIs expose a different interface than pure text completion models. Instead of a single string,
|
||||
And, crucially, their provider APIs use a different interface than pure text completion models. Instead of a single string,
|
||||
they take a list of chat messages as input. Usually these messages are labeled with the speaker (usually one of "System",
|
||||
"AI", and "Human"). And they return a ("AI") chat message as output. GPT-4 and Anthropic's Claude are both implemented as Chat Models.
|
||||
"AI", and "Human"). And they return an AI chat message as output. GPT-4 and Anthropic's Claude are both implemented as chat models.
|
||||
|
||||
To make it possible to swap LLMs and Chat Models, both implement the Base Language Model interface. This exposes common
|
||||
To make it possible to swap LLMs and chat models, both implement the Base Language Model interface. This includes common
|
||||
methods "predict", which takes a string and returns a string, and "predict messages", which takes messages and returns a message.
|
||||
If you are using a specific model it's recommended you use the methods specific to that model class (i.e., "predict" for LLMs and "predict messages" for Chat Models),
|
||||
If you are using a specific model it's recommended you use the methods specific to that model class (i.e., "predict" for LLMs and "predict messages" for chat models),
|
||||
but if you're creating an application that should work with different types of models the shared interface can be helpful.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Few-shot prompt templates
|
||||
|
||||
In this tutorial, we'll learn how to create a prompt template that uses few shot examples. A few shot prompt template can be constructed from either a set of examples, or from an Example Selector object.
|
||||
In this tutorial, we'll learn how to create a prompt template that uses few-shot examples. A few-shot prompt template can be constructed from either a set of examples, or from an Example Selector object.
|
||||
|
||||
import Example from "@snippets/modules/model_io/prompts/prompt_templates/few_shot_examples.mdx"
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ sidebar_position: 0
|
||||
|
||||
Prompt templates are pre-defined recipes for generating prompts for language models.
|
||||
|
||||
A template may include instructions, few shot examples, and specific context and
|
||||
A template may include instructions, few-shot examples, and specific context and
|
||||
questions appropriate for a given task.
|
||||
|
||||
LangChain provides tooling to create and work with prompt templates.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Partial prompt templates
|
||||
|
||||
Like other methods, it can make sense to "partial" a prompt template - eg pass in a subset of the required values, as to create a new prompt template which expects only the remaining subset of values.
|
||||
Like other methods, it can make sense to "partial" a prompt template - e.g. pass in a subset of the required values, as to create a new prompt template which expects only the remaining subset of values.
|
||||
|
||||
LangChain supports this in two ways:
|
||||
1. Partial formatting with string values.
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
This notebook goes over how to compose multiple prompts together. This can be useful when you want to reuse parts of prompts. This can be done with a PipelinePrompt. A PipelinePrompt consists of two main parts:
|
||||
|
||||
- Final prompt: This is the final prompt that is returned
|
||||
- Pipeline prompts: This is a list of tuples, consisting of a string name and a prompt template. Each prompt template will be formatted and then passed to future prompt templates as a variable with the same name.
|
||||
- Final prompt: The final prompt that is returned
|
||||
- Pipeline prompts: A list of tuples, consisting of a string name and a prompt template. Each prompt template will be formatted and then passed to future prompt templates as a variable with the same name.
|
||||
|
||||
import Example from "@snippets/modules/model_io/prompts/prompt_templates/prompt_composition.mdx"
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ sidebar_position: 2
|
||||
# Store and reference chat history
|
||||
The ConversationalRetrievalQA chain builds on RetrievalQAChain to provide a chat history component.
|
||||
|
||||
It first combines the chat history (either explicitly passed in or retrieved from the provided memory) and the question into a standalone question, then looks up relevant documents from the retriever, and finally passes those documents and the question to a question answering chain to return a response.
|
||||
It first combines the chat history (either explicitly passed in or retrieved from the provided memory) and the question into a standalone question, then looks up relevant documents from the retriever, and finally passes those documents and the question to a question-answering chain to return a response.
|
||||
|
||||
To create one, you will need a retriever. In the below example, we will create one from a vector store, which can be created from embeddings.
|
||||
|
||||
|
||||
@@ -6,4 +6,4 @@ sidebar_position: 3
|
||||
|
||||
Web scraping has historically been a challenging endeavor due to the ever-changing nature of website structures, making it tedious for developers to maintain their scraping scripts. Traditional methods often rely on specific HTML tags and patterns which, when altered, can disrupt data extraction processes.
|
||||
|
||||
Enter the LLM-based method for parsing HTML: By leveraging the capabilities of LLMs, and especially OpenAI Functions in LangChain's extraction chain, developers can instruct the model to extract only the desired data in a specified format. This method not only streamlines the extraction process but also significantly reduces the time spent on manual debugging and script modifications. Its adaptability means that even if websites undergo significant design changes, the extraction remains consistent and robust. This level of resilience translates to reduced maintenance efforts, cost savings, and ensures a higher quality of extracted data. Compared to its predecessors, LLM-based approach wins out the web scraping domain by transforming a historically cumbersome task into a more automated and efficient process.
|
||||
Enter the LLM-based method for parsing HTML: By leveraging the capabilities of LLMs, and especially OpenAI Functions in LangChain's extraction chain, developers can instruct the model to extract only the desired data in a specified format. This method not only streamlines the extraction process but also significantly reduces the time spent on manual debugging and script modifications. Its adaptability means that even if websites undergo significant design changes, the extraction remains consistent and robust. This level of resilience translates to reduced maintenance efforts, cost savings, and ensures a higher quality of extracted data. Compared to its predecessors, the LLM-based approach wins out in the web scraping domain by transforming a historically cumbersome task into a more automated and efficient process.
|
||||
|
||||
@@ -44,6 +44,16 @@ module.exports = {
|
||||
id: "modules/index"
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "LangChain Expression Language",
|
||||
collapsed: true,
|
||||
items: [{ type: "autogenerated", dirName: "expression_language" } ],
|
||||
link: {
|
||||
type: 'doc',
|
||||
id: "expression_language/index"
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Guides",
|
||||
@@ -52,17 +62,7 @@ module.exports = {
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
description: 'Design guides for key parts of the development process',
|
||||
slug: "guides",
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Ecosystem",
|
||||
collapsed: true,
|
||||
items: [{ type: "autogenerated", dirName: "ecosystem" }],
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
slug: "ecosystem",
|
||||
slug: "guides",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -72,7 +72,7 @@ module.exports = {
|
||||
items: [{ type: "autogenerated", dirName: "additional_resources" }, { type: "link", label: "Gallery", href: "https://github.com/kyrolabs/awesome-langchain" }],
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
slug: "additional_resources",
|
||||
slug: "additional_resources",
|
||||
},
|
||||
},
|
||||
'community'
|
||||
|
||||
@@ -24,8 +24,7 @@ function Imports({ imports }) {
|
||||
<li key={imported}>
|
||||
<a href={docs}>
|
||||
<span>{imported}</span>
|
||||
</a>{" "}
|
||||
from <code>{source}</code>
|
||||
</a>
|
||||
</li>
|
||||
))}
|
||||
</ul>
|
||||
|
||||
BIN
docs/docs_skeleton/static/img/ReAct.png
Normal file
BIN
docs/docs_skeleton/static/img/ReAct.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/docs_skeleton/static/img/agents_use_case_1.png
Normal file
BIN
docs/docs_skeleton/static/img/agents_use_case_1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 236 KiB |
BIN
docs/docs_skeleton/static/img/agents_use_case_trace_1.png
Normal file
BIN
docs/docs_skeleton/static/img/agents_use_case_trace_1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 74 KiB |
BIN
docs/docs_skeleton/static/img/agents_use_case_trace_2.png
Normal file
BIN
docs/docs_skeleton/static/img/agents_use_case_trace_2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 166 KiB |
BIN
docs/docs_skeleton/static/img/agents_vs_chains.png
Normal file
BIN
docs/docs_skeleton/static/img/agents_vs_chains.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/docs_skeleton/static/img/oai_function_agent.png
Normal file
BIN
docs/docs_skeleton/static/img/oai_function_agent.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 177 KiB |
@@ -1,5 +1,9 @@
|
||||
{
|
||||
"redirects": [
|
||||
{
|
||||
"source": "/docs/modules/data_connection/caching_embeddings(/?)",
|
||||
"destination": "/docs/modules/data_connection/text_embedding/caching_embeddings"
|
||||
},
|
||||
{
|
||||
"source": "/en/latest/additional_resources/youtube.html",
|
||||
"destination": "/docs/additional_resources/youtube"
|
||||
@@ -166,7 +170,7 @@
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/deeplake",
|
||||
"destination": "/docs/integrations/providers/deeplake"
|
||||
"destination": "/docs/integrations/providers/activeloop_deeplake"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/diffbot",
|
||||
@@ -2948,6 +2952,46 @@
|
||||
"source": "/docs/modules/model_io/models/llms/integrations/writer",
|
||||
"destination": "/docs/integrations/llms/writer"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/amazon_api_gateway_example",
|
||||
"destination": "/docs/integrations/llms/amazon_api_gateway"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/azureml_endpoint_example",
|
||||
"destination": "/docs/integrations/llms/azure_ml"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/azure_openai_example",
|
||||
"destination": "/docs/integrations/llms/azure_openai"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/cerebriumai_example",
|
||||
"destination": "/docs/integrations/llms/cerebriumai"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/deepinfra_example",
|
||||
"destination": "/docs/integrations/llms/deepinfra"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/Fireworks",
|
||||
"destination": "/docs/integrations/llms/fireworks"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/forefrontai_example",
|
||||
"destination": "/docs/integrations/llms/forefrontai"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/gooseai_example",
|
||||
"destination": "/docs/integrations/llms/gooseai"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/petals_example",
|
||||
"destination": "/docs/integrations/llms/petals"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/llms/pipelineai_example",
|
||||
"destination": "/docs/integrations/llms/pipelineai"
|
||||
},
|
||||
{
|
||||
"source": "/en/latest/modules/prompts.html",
|
||||
"destination": "/docs/modules/model_io/prompts"
|
||||
@@ -3432,6 +3476,14 @@
|
||||
"source": "/docs/modules/chains/additional/graph_kuzu_qa",
|
||||
"destination": "/docs/use_cases/more/graph/graph_kuzu_qa"
|
||||
},
|
||||
{
|
||||
"source": "/docs/use_cases/graph/graph_falkordb_qa",
|
||||
"destination": "/docs/use_cases/more/graph/graph_falkordb_qa"
|
||||
},
|
||||
{
|
||||
"source": "/docs/modules/chains/additional/graph_falkordb_qa",
|
||||
"destination": "/docs/use_cases/more/graph/graph_falkordb_qa"
|
||||
},
|
||||
{
|
||||
"source": "/docs/use_cases/graph/graph_nebula_qa",
|
||||
"destination": "/docs/use_cases/more/graph/graph_nebula_qa"
|
||||
@@ -3543,6 +3595,18 @@
|
||||
{
|
||||
"source": "/en/latest/integrations/:path*",
|
||||
"destination": "/docs/integrations/providers/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/docs/guides/expression_language(/?)",
|
||||
"destination": "/docs/expression_language/"
|
||||
},
|
||||
{
|
||||
"source": "/docs/guides/expression_language/:path*",
|
||||
"destination": "/docs/expression_language/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/docs/ecosystem/dependents",
|
||||
"destination": "/docs/additional_resources/dependents"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -47,7 +47,7 @@ from langchain.embeddings import integration_class_REPLACE_ME
|
||||
```
|
||||
|
||||
|
||||
## Chat Models
|
||||
## Chat models
|
||||
|
||||
See a [usage example](/docs/integrations/chat/INCLUDE_REAL_NAME)
|
||||
|
||||
|
||||
@@ -51,6 +51,7 @@ Dependents stats for `langchain-ai/langchain`
|
||||
|[e2b-dev/e2b](https://github.com/e2b-dev/e2b) | 5365 |
|
||||
|[mage-ai/mage-ai](https://github.com/mage-ai/mage-ai) | 5352 |
|
||||
|[wenda-LLM/wenda](https://github.com/wenda-LLM/wenda) | 5192 |
|
||||
|[liaokongVFX/LangChain-Chinese-Getting-Started-Guide](https://github.com/liaokongVFX/LangChain-Chinese-Getting-Started-Guide) | 5129 |
|
||||
|[zilliztech/GPTCache](https://github.com/zilliztech/GPTCache) | 4993 |
|
||||
|[GreyDGL/PentestGPT](https://github.com/GreyDGL/PentestGPT) | 4831 |
|
||||
|[zauberzeug/nicegui](https://github.com/zauberzeug/nicegui) | 4824 |
|
||||
@@ -1318,7 +1318,7 @@
|
||||
"source": [
|
||||
"template = \"\"\"Write some python code to solve the user's problem. \n",
|
||||
"\n",
|
||||
"Return only python code in Markdown format, eg:\n",
|
||||
"Return only python code in Markdown format, e.g.:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"....\n",
|
||||
@@ -62,7 +62,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"id": "d1850a1f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -72,7 +72,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"id": "56d0669f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -170,6 +170,36 @@
|
||||
"chain.batch([{\"topic\": \"bears\"}, {\"topic\": \"cats\"}])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2434ab15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can set the number of concurrent requests by using the `max_concurrency` parameter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "a08522f6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Why don't bears wear shoes?\\n\\nBecause they have bear feet!\", additional_kwargs={}, example=False),\n",
|
||||
" AIMessage(content=\"Why don't cats play poker in the wild?\\n\\nToo many cheetahs!\", additional_kwargs={}, example=False)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.batch([{\"topic\": \"bears\"}, {\"topic\": \"cats\"}], config={\"max_concurrency\": 5})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b960cbfe",
|
||||
@@ -8,7 +8,7 @@ Here's a few different tools and functionalities to aid in debugging.
|
||||
|
||||
## Tracing
|
||||
|
||||
Platforms with tracing capabilities like [LangSmith](/docs/guides/langsmith/) and [WandB](/docs/ecosystem/integrations/agent_with_wandb_tracing) are the most comprehensive solutions for debugging. These platforms make it easy to not only log and visualize LLM apps, but also to actively debug, test and refine them.
|
||||
Platforms with tracing capabilities like [LangSmith](/docs/guides/langsmith/) and [WandB](/docs/integrations/providers/wandb_tracing) are the most comprehensive solutions for debugging. These platforms make it easy to not only log and visualize LLM apps, but also to actively debug, test and refine them.
|
||||
|
||||
For anyone building production-grade LLM applications, we highly recommend using a platform like this.
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ It also contains instructions for how to deploy this app on the Streamlit platfo
|
||||
|
||||
## [Gradio (on Hugging Face)](https://github.com/hwchase17/langchain-gradio-template)
|
||||
|
||||
This repo serves as a template for how deploy a LangChain with Gradio.
|
||||
This repo serves as a template for how to deploy a LangChain with Gradio.
|
||||
It implements a chatbot interface, with a "Bring-Your-Own-Token" approach (nice for not wracking up big bills).
|
||||
It also contains instructions for how to deploy this app on the Hugging Face platform.
|
||||
This is heavily influenced by James Weaver's [excellent examples](https://huggingface.co/JavaFXpert).
|
||||
@@ -27,7 +27,7 @@ Chainlit [doc](https://docs.chainlit.io/langchain) on the integration with LangC
|
||||
|
||||
## [Beam](https://github.com/slai-labs/get-beam/tree/main/examples/langchain-question-answering)
|
||||
|
||||
This repo serves as a template for how deploy a LangChain with [Beam](https://beam.cloud).
|
||||
This repo serves as a template for how to deploy a LangChain with [Beam](https://beam.cloud).
|
||||
|
||||
It implements a Question Answering app and contains instructions for deploying the app as a serverless REST API.
|
||||
|
||||
@@ -49,7 +49,7 @@ A minimal example of how to deploy LangChain to [Fly.io](https://fly.io/) using
|
||||
|
||||
## [Digitalocean App Platform](https://github.com/homanp/digitalocean-langchain)
|
||||
|
||||
A minimal example on how to deploy LangChain to DigitalOcean App Platform.
|
||||
A minimal example of how to deploy LangChain to DigitalOcean App Platform.
|
||||
|
||||
## [CI/CD Google Cloud Build + Dockerfile + Serverless Google Cloud Run](https://github.com/g-emarco/github-assistant)
|
||||
|
||||
@@ -57,7 +57,7 @@ Boilerplate LangChain project on how to deploy to Google Cloud Run using Docker
|
||||
|
||||
## [Google Cloud Run](https://github.com/homanp/gcp-langchain)
|
||||
|
||||
A minimal example on how to deploy LangChain to Google Cloud Run.
|
||||
A minimal example of how to deploy LangChain to Google Cloud Run.
|
||||
|
||||
## [SteamShip](https://github.com/steamship-core/steamship-langchain/)
|
||||
|
||||
@@ -79,3 +79,7 @@ See OpenLLM's [integration doc](https://github.com/bentoml/OpenLLM#%EF%B8%8F-int
|
||||
## [Databutton](https://databutton.com/home?new-data-app=true)
|
||||
|
||||
These templates serve as examples of how to build, deploy, and share LangChain applications using Databutton. You can create user interfaces with Streamlit, automate tasks by scheduling Python code, and store files and data in the built-in store. Examples include a Chatbot interface with conversational memory, a Personal search engine, and a starter template for LangChain apps. Deploying and sharing is just one click away.
|
||||
|
||||
## [AzureML Online Endpoint](https://github.com/Azure/azureml-examples/blob/main/sdk/python/endpoints/online/llm/langchain/1_langchain_basic_deploy.ipynb)
|
||||
|
||||
A minimal example of how to deploy LangChain to an Azure Machine Learning Online Endpoint.
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"id": "b8982428",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Private, local, open source LLMs\n",
|
||||
"# Run LLMs locally\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
"\n",
|
||||
@@ -146,7 +146,7 @@
|
||||
"source": [
|
||||
"## Environment\n",
|
||||
"\n",
|
||||
"Inference speed is a chllenge when running models locally (see above).\n",
|
||||
"Inference speed is a challenge when running models locally (see above).\n",
|
||||
"\n",
|
||||
"To minimize latency, it is desiable to run models locally on GPU, which ships with many consumer laptops [e.g., Apple devices](https://www.apple.com/newsroom/2022/06/apple-unveils-m2-with-breakthrough-performance-and-capabilities/).\n",
|
||||
"\n",
|
||||
@@ -264,88 +264,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pip install llama-cpp-python"
|
||||
"CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dirclear"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "9d5f94b5",
|
||||
"execution_count": null,
|
||||
"id": "a88bf0c8-e989-4bcd-bcb7-4d7757e684f2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"objc[10142]: Class GGMLMetalClass is implemented in both /Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/gpt4all/llmodel_DO_NOT_MODIFY/build/libreplit-mainline-metal.dylib (0x2a0c4c208) and /Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/llama_cpp/libllama.dylib (0x2c28bc208). One of the two will be used. Which one is undefined.\n",
|
||||
"llama.cpp: loading model from /Users/rlm/Desktop/Code/llama.cpp/llama-2-13b-chat.ggmlv3.q4_0.bin\n",
|
||||
"llama_model_load_internal: format = ggjt v3 (latest)\n",
|
||||
"llama_model_load_internal: n_vocab = 32000\n",
|
||||
"llama_model_load_internal: n_ctx = 2048\n",
|
||||
"llama_model_load_internal: n_embd = 5120\n",
|
||||
"llama_model_load_internal: n_mult = 256\n",
|
||||
"llama_model_load_internal: n_head = 40\n",
|
||||
"llama_model_load_internal: n_layer = 40\n",
|
||||
"llama_model_load_internal: n_rot = 128\n",
|
||||
"llama_model_load_internal: freq_base = 10000.0\n",
|
||||
"llama_model_load_internal: freq_scale = 1\n",
|
||||
"llama_model_load_internal: ftype = 2 (mostly Q4_0)\n",
|
||||
"llama_model_load_internal: n_ff = 13824\n",
|
||||
"llama_model_load_internal: model size = 13B\n",
|
||||
"llama_model_load_internal: ggml ctx size = 0.09 MB\n",
|
||||
"llama_model_load_internal: mem required = 8953.71 MB (+ 1608.00 MB per state)\n",
|
||||
"llama_new_context_with_model: kv self size = 1600.00 MB\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
"ggml_metal_init: using MPS\n",
|
||||
"ggml_metal_init: loading '/Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/llama_cpp/ggml-metal.metal'\n",
|
||||
"ggml_metal_init: loaded kernel_add 0x47774af60\n",
|
||||
"ggml_metal_init: loaded kernel_mul 0x47774bc00\n",
|
||||
"ggml_metal_init: loaded kernel_mul_row 0x47774c230\n",
|
||||
"ggml_metal_init: loaded kernel_scale 0x47774c890\n",
|
||||
"ggml_metal_init: loaded kernel_silu 0x47774cef0\n",
|
||||
"ggml_metal_init: loaded kernel_relu 0x10e33e500\n",
|
||||
"ggml_metal_init: loaded kernel_gelu 0x47774b2f0\n",
|
||||
"ggml_metal_init: loaded kernel_soft_max 0x47771a580\n",
|
||||
"ggml_metal_init: loaded kernel_diag_mask_inf 0x47774dab0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_f16 0x47774e110\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_0 0x47774e7d0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_1 0x13efd7170\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q2_K 0x13efd73d0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q3_K 0x13efd7630\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_K 0x13efd7890\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q5_K 0x4744c9740\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q6_K 0x4744ca6b0\n",
|
||||
"ggml_metal_init: loaded kernel_rms_norm 0x4744cb250\n",
|
||||
"ggml_metal_init: loaded kernel_norm 0x4744cb970\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_f16_f32 0x10e33f700\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_0_f32 0x10e33fcd0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_1_f32 0x4744cc2d0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q2_K_f32 0x4744cc6f0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q3_K_f32 0x4744cd6b0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_K_f32 0x4744cde20\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q5_K_f32 0x10e33ff30\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q6_K_f32 0x10e340190\n",
|
||||
"ggml_metal_init: loaded kernel_rope 0x10e3403f0\n",
|
||||
"ggml_metal_init: loaded kernel_alibi_f32 0x10e340de0\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f16 0x10e3416d0\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f32 0x10e342080\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f16_f16 0x10e342ca0\n",
|
||||
"ggml_metal_init: recommendedMaxWorkingSetSize = 21845.34 MB\n",
|
||||
"ggml_metal_init: hasUnifiedMemory = true\n",
|
||||
"ggml_metal_init: maxTransferRate = built-in GPU\n",
|
||||
"ggml_metal_add_buffer: allocated 'data ' buffer, size = 6984.06 MB, ( 6986.19 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'eval ' buffer, size = 1032.00 MB, ( 8018.19 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'kv ' buffer, size = 1602.00 MB, ( 9620.19 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr0 ' buffer, size = 426.00 MB, (10046.19 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr1 ' buffer, size = 512.00 MB, (10558.19 / 21845.34)\n",
|
||||
"AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import LlamaCpp\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/llama-2-13b-chat.ggmlv3.q4_0.bin\",\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\",\n",
|
||||
" n_gpu_layers=1,\n",
|
||||
" n_batch=512,\n",
|
||||
" n_ctx=2048,\n",
|
||||
@@ -448,87 +379,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "b55a2147",
|
||||
"execution_count": null,
|
||||
"id": "915ecd4c-8f6b-4de3-a787-b64cb7c682b4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found model file at /Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\n",
|
||||
"llama_new_context_with_model: max tensor size = 87.89 MB\n",
|
||||
"llama_new_context_with_model: max tensor size = 87.89 MB\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"llama.cpp: using Metal\n",
|
||||
"llama.cpp: loading model from /Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\n",
|
||||
"llama_model_load_internal: format = ggjt v3 (latest)\n",
|
||||
"llama_model_load_internal: n_vocab = 32001\n",
|
||||
"llama_model_load_internal: n_ctx = 2048\n",
|
||||
"llama_model_load_internal: n_embd = 5120\n",
|
||||
"llama_model_load_internal: n_mult = 256\n",
|
||||
"llama_model_load_internal: n_head = 40\n",
|
||||
"llama_model_load_internal: n_layer = 40\n",
|
||||
"llama_model_load_internal: n_rot = 128\n",
|
||||
"llama_model_load_internal: ftype = 2 (mostly Q4_0)\n",
|
||||
"llama_model_load_internal: n_ff = 13824\n",
|
||||
"llama_model_load_internal: n_parts = 1\n",
|
||||
"llama_model_load_internal: model size = 13B\n",
|
||||
"llama_model_load_internal: ggml ctx size = 0.09 MB\n",
|
||||
"llama_model_load_internal: mem required = 9031.71 MB (+ 1608.00 MB per state)\n",
|
||||
"llama_new_context_with_model: kv self size = 1600.00 MB\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
"ggml_metal_init: using MPS\n",
|
||||
"ggml_metal_init: loading '/Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/gpt4all/llmodel_DO_NOT_MODIFY/build/ggml-metal.metal'\n",
|
||||
"ggml_metal_init: loaded kernel_add 0x37944d850\n",
|
||||
"ggml_metal_init: loaded kernel_mul 0x37944f350\n",
|
||||
"ggml_metal_init: loaded kernel_mul_row 0x37944fdd0\n",
|
||||
"ggml_metal_init: loaded kernel_scale 0x3794505a0\n",
|
||||
"ggml_metal_init: loaded kernel_silu 0x379450800\n",
|
||||
"ggml_metal_init: loaded kernel_relu 0x379450a60\n",
|
||||
"ggml_metal_init: loaded kernel_gelu 0x379450cc0\n",
|
||||
"ggml_metal_init: loaded kernel_soft_max 0x379450ff0\n",
|
||||
"ggml_metal_init: loaded kernel_diag_mask_inf 0x379451250\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_f16 0x3794514b0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_0 0x379451710\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_1 0x379451970\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q2_k 0x379451bd0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q3_k 0x379451e30\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_k 0x379452090\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q5_k 0x3794522f0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q6_k 0x379452550\n",
|
||||
"ggml_metal_init: loaded kernel_rms_norm 0x3794527b0\n",
|
||||
"ggml_metal_init: loaded kernel_norm 0x379452a10\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_f16_f32 0x379452c70\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_0_f32 0x379452ed0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_1_f32 0x379453130\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q2_k_f32 0x379453390\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q3_k_f32 0x3794535f0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_k_f32 0x379453850\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q5_k_f32 0x379453ab0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q6_k_f32 0x379453d10\n",
|
||||
"ggml_metal_init: loaded kernel_rope 0x379453f70\n",
|
||||
"ggml_metal_init: loaded kernel_alibi_f32 0x3794541d0\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f16 0x379454430\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f32 0x379454690\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f16_f16 0x3794548f0\n",
|
||||
"ggml_metal_init: recommendedMaxWorkingSetSize = 21845.34 MB\n",
|
||||
"ggml_metal_init: hasUnifiedMemory = true\n",
|
||||
"ggml_metal_init: maxTransferRate = built-in GPU\n",
|
||||
"ggml_metal_add_buffer: allocated 'data ' buffer, size = 6984.06 MB, (17542.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'eval ' buffer, size = 1024.00 MB, (18566.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'kv ' buffer, size = 1602.00 MB, (20168.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr0 ' buffer, size = 512.00 MB, (20680.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr1 ' buffer, size = 512.00 MB, (21192.94 / 21845.34)\n",
|
||||
"ggml_metal_free: deallocating\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import GPT4All\n",
|
||||
"llm = GPT4All(model=\"/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\")"
|
||||
@@ -564,89 +418,21 @@
|
||||
"\n",
|
||||
"Some LLMs will benefit from specific prompts.\n",
|
||||
"\n",
|
||||
"For example, llama2 can use [special tokens](https://twitter.com/RLanceMartin/status/1681879318493003776?s=20).\n",
|
||||
"For example, LLaMA will use [special tokens](https://twitter.com/RLanceMartin/status/1681879318493003776?s=20).\n",
|
||||
"\n",
|
||||
"We can use `ConditionalPromptSelector` to set prompt based on the model type."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "d082b10a",
|
||||
"execution_count": null,
|
||||
"id": "16759b7c-7903-4269-b7b4-f83b313d8091",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"llama.cpp: loading model from /Users/rlm/Desktop/Code/llama.cpp/llama-2-13b-chat.ggmlv3.q4_0.bin\n",
|
||||
"llama_model_load_internal: format = ggjt v3 (latest)\n",
|
||||
"llama_model_load_internal: n_vocab = 32000\n",
|
||||
"llama_model_load_internal: n_ctx = 2048\n",
|
||||
"llama_model_load_internal: n_embd = 5120\n",
|
||||
"llama_model_load_internal: n_mult = 256\n",
|
||||
"llama_model_load_internal: n_head = 40\n",
|
||||
"llama_model_load_internal: n_layer = 40\n",
|
||||
"llama_model_load_internal: n_rot = 128\n",
|
||||
"llama_model_load_internal: freq_base = 10000.0\n",
|
||||
"llama_model_load_internal: freq_scale = 1\n",
|
||||
"llama_model_load_internal: ftype = 2 (mostly Q4_0)\n",
|
||||
"llama_model_load_internal: n_ff = 13824\n",
|
||||
"llama_model_load_internal: model size = 13B\n",
|
||||
"llama_model_load_internal: ggml ctx size = 0.09 MB\n",
|
||||
"llama_model_load_internal: mem required = 8953.71 MB (+ 1608.00 MB per state)\n",
|
||||
"llama_new_context_with_model: kv self size = 1600.00 MB\n",
|
||||
"ggml_metal_init: allocating\n",
|
||||
"ggml_metal_init: using MPS\n",
|
||||
"ggml_metal_init: loading '/Users/rlm/miniforge3/envs/llama/lib/python3.9/site-packages/llama_cpp/ggml-metal.metal'\n",
|
||||
"ggml_metal_init: loaded kernel_add 0x4744d09d0\n",
|
||||
"ggml_metal_init: loaded kernel_mul 0x3781cb3d0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_row 0x37813bb60\n",
|
||||
"ggml_metal_init: loaded kernel_scale 0x474481080\n",
|
||||
"ggml_metal_init: loaded kernel_silu 0x4744d29f0\n",
|
||||
"ggml_metal_init: loaded kernel_relu 0x3781254c0\n",
|
||||
"ggml_metal_init: loaded kernel_gelu 0x47447f280\n",
|
||||
"ggml_metal_init: loaded kernel_soft_max 0x4744cf470\n",
|
||||
"ggml_metal_init: loaded kernel_diag_mask_inf 0x4744cf6d0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_f16 0x4744cf930\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_0 0x4744cfb90\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_1 0x4744cfdf0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q2_K 0x4744d0050\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q3_K 0x4744ce980\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q4_K 0x4744cebe0\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q5_K 0x4744cee40\n",
|
||||
"ggml_metal_init: loaded kernel_get_rows_q6_K 0x4744cf0a0\n",
|
||||
"ggml_metal_init: loaded kernel_rms_norm 0x474482450\n",
|
||||
"ggml_metal_init: loaded kernel_norm 0x4744826b0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_f16_f32 0x474482910\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_0_f32 0x474482b70\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_1_f32 0x474482dd0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q2_K_f32 0x474483030\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q3_K_f32 0x474483290\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q4_K_f32 0x4744834f0\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q5_K_f32 0x474483750\n",
|
||||
"ggml_metal_init: loaded kernel_mul_mat_q6_K_f32 0x4744839b0\n",
|
||||
"ggml_metal_init: loaded kernel_rope 0x474483c10\n",
|
||||
"ggml_metal_init: loaded kernel_alibi_f32 0x474483e70\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f16 0x4744840d0\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f32_f32 0x474484330\n",
|
||||
"ggml_metal_init: loaded kernel_cpy_f16_f16 0x474484590\n",
|
||||
"ggml_metal_init: recommendedMaxWorkingSetSize = 21845.34 MB\n",
|
||||
"ggml_metal_init: hasUnifiedMemory = true\n",
|
||||
"ggml_metal_init: maxTransferRate = built-in GPU\n",
|
||||
"ggml_metal_add_buffer: allocated 'data ' buffer, size = 6984.06 MB, ( 6986.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'eval ' buffer, size = 1032.00 MB, ( 8018.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'kv ' buffer, size = 1602.00 MB, ( 9620.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr0 ' buffer, size = 426.00 MB, (10046.94 / 21845.34)\n",
|
||||
"ggml_metal_add_buffer: allocated 'scr1 ' buffer, size = 512.00 MB, (10558.94 / 21845.34)\n",
|
||||
"AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set our LLM\n",
|
||||
"llm = LlamaCpp(\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/llama-2-13b-chat.ggmlv3.q4_0.bin\",\n",
|
||||
" model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\",\n",
|
||||
" n_gpu_layers=1,\n",
|
||||
" n_batch=512,\n",
|
||||
" n_ctx=2048,\n",
|
||||
@@ -661,7 +447,7 @@
|
||||
"id": "66656084",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Set the associated prompt."
|
||||
"Set the associated prompt based upon the model version."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -759,6 +545,18 @@
|
||||
"llm_chain.run({\"question\":question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6e0d37e7-f1d9-4848-bf2c-c22392ee141f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We also can use the LangChain Prompt Hub to fetch and / or store prompts that are model specific.\n",
|
||||
"\n",
|
||||
"This will work with your [LangSmith API key](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"For example, [here](https://smith.langchain.com/hub/rlm/rag-prompt-llama) is a prompt for RAG with LLaMA-specific tokens."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6ba66260",
|
||||
@@ -770,16 +568,12 @@
|
||||
"\n",
|
||||
"For example, here is a guide to [RAG](docs/use_cases/question_answering/how_to/local_retrieval_qa) with local LLMs.\n",
|
||||
"\n",
|
||||
"In general, use cases for local model can be driven by at least two factors:\n",
|
||||
"In general, use cases for local LLMs can be driven by at least two factors:\n",
|
||||
"\n",
|
||||
"* `Privacy`: private data (e.g., journals, etc) that a user does not want to share \n",
|
||||
"* `Cost`: text preprocessing (extraction/tagging), summarization, and agent simulations are token-use-intensive tasks\n",
|
||||
"\n",
|
||||
"There are a few approach to support specific use-cases: \n",
|
||||
"\n",
|
||||
"* Fine-tuning (e.g., [gpt-llm-trainer](https://github.com/mshumer/gpt-llm-trainer), [Anyscale](https://www.anyscale.com/blog/fine-tuning-llama-2-a-comprehensive-case-study-for-tailoring-models-to-unique-applications)) \n",
|
||||
"* [Function-calling](https://github.com/MeetKai/functionary/tree/main) for use-cases like extraction or tagging\n",
|
||||
"\n"
|
||||
"In addition, [here](https://blog.langchain.dev/using-langsmith-to-support-fine-tuning-of-open-source-llms/) is an overview on fine-tuning, which can utilize open source LLMs."
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -799,7 +593,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
1
docs/extras/guides/privacy/_category_.yml
Normal file
1
docs/extras/guides/privacy/_category_.yml
Normal file
@@ -0,0 +1 @@
|
||||
label: 'Privacy'
|
||||
451
docs/extras/guides/privacy/presidio_data_anonymization.ipynb
Normal file
451
docs/extras/guides/privacy/presidio_data_anonymization.ipynb
Normal file
@@ -0,0 +1,451 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Data anonymization with Microsoft Presidio\n",
|
||||
"\n",
|
||||
"[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/guides/privacy/presidio_data_anonymization.ipynb)\n",
|
||||
"\n",
|
||||
"## Use case\n",
|
||||
"\n",
|
||||
"Data anonymization is crucial before passing information to a language model like GPT-4 because it helps protect privacy and maintain confidentiality. If data is not anonymized, sensitive information such as names, addresses, contact numbers, or other identifiers linked to specific individuals could potentially be learned and misused. Hence, by obscuring or removing this personally identifiable information (PII), data can be used freely without compromising individuals' privacy rights or breaching data protection laws and regulations.\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"\n",
|
||||
"Anonynization consists of two steps:\n",
|
||||
"\n",
|
||||
"1. **Identification:** Identify all data fields that contain personally identifiable information (PII).\n",
|
||||
"2. **Replacement**: Replace all PIIs with pseudo values or codes that do not reveal any personal information about the individual but can be used for reference. We're not using regular encryption, because the language model won't be able to understand the meaning or context of the encrypted data.\n",
|
||||
"\n",
|
||||
"We use *Microsoft Presidio* together with *Faker* framework for anonymization purposes because of the wide range of functionalities they provide. The full implementation is available in `PresidioAnonymizer`.\n",
|
||||
"\n",
|
||||
"## Quickstart\n",
|
||||
"\n",
|
||||
"Below you will find the use case on how to leverage anonymization in LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install necessary packages\n",
|
||||
"# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n",
|
||||
"# ! python -m spacy download en_core_web_lg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\\\n",
|
||||
"Let's see how PII anonymization works using a sample sentence:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'My name is Mrs. Rachel Chen DDS, call me at 849-829-7628x073 or email me at christopherfrey@example.org'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_experimental.data_anonymizer import PresidioAnonymizer\n",
|
||||
"\n",
|
||||
"anonymizer = PresidioAnonymizer()\n",
|
||||
"\n",
|
||||
"anonymizer.anonymize(\n",
|
||||
" \"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using with LangChain Expression Language\n",
|
||||
"\n",
|
||||
"With LCEL we can easily chain together anonymization with the rest of our application."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set env var OPENAI_API_KEY or load from a .env file:\n",
|
||||
"# import dotenv\n",
|
||||
"\n",
|
||||
"# dotenv.load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='You can find our super secret data at https://www.ross.com/', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"template = \"\"\"According to this text, where can you find our super secret data?\n",
|
||||
"\n",
|
||||
"{anonymized_text}\n",
|
||||
"\n",
|
||||
"Answer:\"\"\"\n",
|
||||
"prompt = PromptTemplate.from_template(template)\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"chain = {\"anonymized_text\": anonymizer.anonymize} | prompt | llm\n",
|
||||
"chain.invoke(\"You can find our super secret data at https://supersecretdata.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Customization\n",
|
||||
"We can specify ``analyzed_fields`` to only anonymize particular types of data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'My name is Gabrielle Edwards, call me at 313-666-7440 or email me at real.slim.shady@gmail.com'"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"anonymizer = PresidioAnonymizer(analyzed_fields=[\"PERSON\"])\n",
|
||||
"\n",
|
||||
"anonymizer.anonymize(\n",
|
||||
" \"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"As can be observed, the name was correctly identified and replaced with another. The `analyzed_fields` attribute is responsible for what values are to be detected and substituted. We can add *PHONE_NUMBER* to the list:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'My name is Victoria Mckinney, call me at 713-549-8623 or email me at real.slim.shady@gmail.com'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"anonymizer = PresidioAnonymizer(analyzed_fields=[\"PERSON\", \"PHONE_NUMBER\"])\n",
|
||||
"anonymizer.anonymize(\n",
|
||||
" \"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\\\n",
|
||||
"If no analyzed_fields are specified, by default the anonymizer will detect all supported formats. Below is the full list of them:\n",
|
||||
"\n",
|
||||
"`['PERSON', 'EMAIL_ADDRESS', 'PHONE_NUMBER', 'IBAN_CODE', 'CREDIT_CARD', 'CRYPTO', 'IP_ADDRESS', 'LOCATION', 'DATE_TIME', 'NRP', 'MEDICAL_LICENSE', 'URL', 'US_BANK_NUMBER', 'US_DRIVER_LICENSE', 'US_ITIN', 'US_PASSPORT', 'US_SSN']`\n",
|
||||
"\n",
|
||||
"**Disclaimer:** We suggest carefully defining the private data to be detected - Presidio doesn't work perfectly and it sometimes makes mistakes, so it's better to have more control over the data."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'My name is Billy Russo, call me at 970-996-9453x038 or email me at jamie80@example.org'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"anonymizer = PresidioAnonymizer()\n",
|
||||
"anonymizer.anonymize(\n",
|
||||
" \"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\\\n",
|
||||
"It may be that the above list of detected fields is not sufficient. For example, the already available *PHONE_NUMBER* field does not support polish phone numbers and confuses it with another field:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'My polish phone number is EVIA70648911396944'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"anonymizer = PresidioAnonymizer()\n",
|
||||
"anonymizer.anonymize(\"My polish phone number is 666555444\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\\\n",
|
||||
"You can then write your own recognizers and add them to the pool of those present. How exactly to create recognizers is described in the [Presidio documentation](https://microsoft.github.io/presidio/samples/python/customizing_presidio_analyzer/)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define the regex pattern in a Presidio `Pattern` object:\n",
|
||||
"from presidio_analyzer import Pattern, PatternRecognizer\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"polish_phone_numbers_pattern = Pattern(\n",
|
||||
" name=\"polish_phone_numbers_pattern\",\n",
|
||||
" regex=\"(?<!\\w)(\\(?(\\+|00)?48\\)?)?[ -]?\\d{3}[ -]?\\d{3}[ -]?\\d{3}(?!\\w)\",\n",
|
||||
" score=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Define the recognizer with one or more patterns\n",
|
||||
"polish_phone_numbers_recognizer = PatternRecognizer(\n",
|
||||
" supported_entity=\"POLISH_PHONE_NUMBER\", patterns=[polish_phone_numbers_pattern]\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\\\n",
|
||||
"Now, we can add recognizer by calling `add_recognizer` method on the anonymizer:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"anonymizer.add_recognizer(polish_phone_numbers_recognizer)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\\\n",
|
||||
"And voilà! With the added pattern-based recognizer, the anonymizer now handles polish phone numbers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"My polish phone number is <POLISH_PHONE_NUMBER>\n",
|
||||
"My polish phone number is <POLISH_PHONE_NUMBER>\n",
|
||||
"My polish phone number is <POLISH_PHONE_NUMBER>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(anonymizer.anonymize(\"My polish phone number is 666555444\"))\n",
|
||||
"print(anonymizer.anonymize(\"My polish phone number is 666 555 444\"))\n",
|
||||
"print(anonymizer.anonymize(\"My polish phone number is +48 666 555 444\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\\\n",
|
||||
"The problem is - even though we recognize polish phone numbers now, we don't have a method (operator) that would tell how to substitute a given field - because of this, in the outpit we only provide string `<POLISH_PHONE_NUMBER>` We need to create a method to replace it correctly: "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'+48 533 220 543'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from faker import Faker\n",
|
||||
"\n",
|
||||
"fake = Faker(locale=\"pl_PL\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def fake_polish_phone_number(_=None):\n",
|
||||
" return fake.phone_number()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"fake_polish_phone_number()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\\\n",
|
||||
"We used Faker to create pseudo data. Now we can create an operator and add it to the anonymizer. For complete information about operators and their creation, see the Presidio documentation for [simple](https://microsoft.github.io/presidio/tutorial/10_simple_anonymization/) and [custom](https://microsoft.github.io/presidio/tutorial/11_custom_anonymization/) anonymization."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from presidio_anonymizer.entities import OperatorConfig\n",
|
||||
"\n",
|
||||
"new_operators = {\n",
|
||||
" \"POLISH_PHONE_NUMBER\": OperatorConfig(\n",
|
||||
" \"custom\", {\"lambda\": fake_polish_phone_number}\n",
|
||||
" )\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"anonymizer.add_operators(new_operators)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'My polish phone number is +48 692 715 636'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"anonymizer.anonymize(\"My polish phone number is 666555444\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Future works\n",
|
||||
"\n",
|
||||
"- **deanonymization** - add the ability to reverse anonymization. For example, the workflow could look like this: `anonymize -> LLMChain -> deanonymize`. By doing this, we will retain anonymity in requests to, for example, OpenAI, and then be able restore the original data.\n",
|
||||
"- **instance anonymization** - at this point, each occurrence of PII is treated as a separate entity and separately anonymized. Therefore, two occurrences of the name John Doe in the text will be changed to two different names. It is therefore worth introducing support for full instance detection, so that repeated occurrences are treated as a single object."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
# Pydantic Compatibility
|
||||
# Pydantic compatibility
|
||||
|
||||
- Pydantic v2 was released in June, 2023 (https://docs.pydantic.dev/2.0/blog/pydantic-v2-final/)
|
||||
- v2 contains has a number of breaking changes (https://docs.pydantic.dev/2.0/migration/)
|
||||
- Pydantic v2 and v1 are under the same package name, so both versions cannot be installed at the same time
|
||||
|
||||
## LangChain Pydantic Migration Plan
|
||||
## LangChain Pydantic migration plan
|
||||
|
||||
As of `langchain>=0.0.267`, LangChain will allow users to install either Pydantic V1 or V2.
|
||||
* Internally LangChain will continue to [use V1](https://docs.pydantic.dev/latest/migration/#continue-using-pydantic-v1-features).
|
||||
|
||||
1390
docs/extras/guides/safety/amazon_comprehend_chain.ipynb
Normal file
1390
docs/extras/guides/safety/amazon_comprehend_chain.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -93,7 +93,7 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage\n",
|
||||
"### Using the Context callback within a Chat Model\n",
|
||||
"### Using the Context callback within a chat model\n",
|
||||
"\n",
|
||||
"The Context callback handler can be used to directly record transcripts between users and AI assistants.\n",
|
||||
"\n",
|
||||
|
||||
File diff suppressed because one or more lines are too long
63
docs/extras/integrations/callbacks/llmonitor.md
Normal file
63
docs/extras/integrations/callbacks/llmonitor.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# LLMonitor
|
||||
|
||||
[LLMonitor](https://llmonitor.com) is an open-source observability platform that provides cost tracking, user tracking and powerful agent tracing.
|
||||
|
||||
<video controls width='100%' >
|
||||
<source src='https://llmonitor.com/videos/demo-annotated.mp4'/>
|
||||
</video>
|
||||
|
||||
## Setup
|
||||
Create an account on [llmonitor.com](https://llmonitor.com), create an `App`, and then copy the associated `tracking id`.
|
||||
Once you have it, set it as an environment variable by running:
|
||||
```bash
|
||||
export LLMONITOR_APP_ID="..."
|
||||
```
|
||||
|
||||
If you'd prefer not to set an environment variable, you can pass the key directly when initializing the callback handler:
|
||||
```python
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
handler = LLMonitorCallbackHandler(app_id="...")
|
||||
```
|
||||
|
||||
## Usage with LLM/Chat models
|
||||
```python
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
handler = LLMonitorCallbackHandler(app_id="...")
|
||||
|
||||
llm = OpenAI(
|
||||
callbacks=[handler],
|
||||
)
|
||||
|
||||
chat = ChatOpenAI(
|
||||
callbacks=[handler],
|
||||
metadata={"userId": "123"}, # you can assign user ids to models in the metadata
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
## Usage with agents
|
||||
```python
|
||||
from langchain.agents import load_tools, initialize_agent, AgentType
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.callbacks import LLMonitorCallbackHandler
|
||||
|
||||
handler = LLMonitorCallbackHandler(app_id="...")
|
||||
|
||||
llm = OpenAI(temperature=0)
|
||||
tools = load_tools(["serpapi", "llm-math"], llm=llm)
|
||||
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
|
||||
agent.run(
|
||||
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?",
|
||||
callbacks=[handler],
|
||||
metadata={
|
||||
"agentName": "Leo DiCaprio's girlfriend", # you can assign a custom agent in the metadata
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Support
|
||||
For any question or issue with integration you can reach out to the LLMonitor team on [Discord](http://discord.com/invite/8PafSG58kK) or via [email](mailto:vince@llmonitor.com).
|
||||
@@ -11,7 +11,7 @@
|
||||
"\n",
|
||||
"[PromptLayer](https://promptlayer.com) is a an LLM observability platform that lets you visualize requests, version prompts, and track usage. In this guide we will go over how to setup the `PromptLayerCallbackHandler`. \n",
|
||||
"\n",
|
||||
"While PromptLayer does have LLMs that integrate directly with LangChain (eg [`PromptLayerOpenAI`](https://python.langchain.com/docs/integrations/llms/promptlayer_openai)), this callback is the recommended way to integrate PromptLayer with LangChain.\n",
|
||||
"While PromptLayer does have LLMs that integrate directly with LangChain (e.g. [`PromptLayerOpenAI`](https://python.langchain.com/docs/integrations/llms/promptlayer_openai)), this callback is the recommended way to integrate PromptLayer with LangChain.\n",
|
||||
"\n",
|
||||
"See [our docs](https://docs.promptlayer.com/languages/langchain) for more information."
|
||||
]
|
||||
|
||||
106
docs/extras/integrations/chat/bedrock.ipynb
Normal file
106
docs/extras/integrations/chat/bedrock.ipynb
Normal file
@@ -0,0 +1,106 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bf733a38-db84-4363-89e2-de6735c37230",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Bedrock Chat\n",
|
||||
"\n",
|
||||
"[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that makes FMs from leading AI startups and Amazon available via an API, so you can choose from a wide range of FMs to find the model that is best suited for your use case"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d51edc81",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install boto3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import BedrockChat\n",
|
||||
"from langchain.schema import HumanMessage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chat = BedrockChat(model_id=\"anthropic.claude-v2\", model_kwargs={\"temperature\":0.1})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\" Voici la traduction en français : J'adore programmer.\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=\"Translate this sentence from English to French. I love programming.\"\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"chat(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c253883f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
382
docs/extras/integrations/chat/ollama.ipynb
Normal file
382
docs/extras/integrations/chat/ollama.ipynb
Normal file
@@ -0,0 +1,382 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Ollama\n",
|
||||
"\n",
|
||||
"[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as LLaMA2, locally.\n",
|
||||
"\n",
|
||||
"Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. \n",
|
||||
"\n",
|
||||
"It optimizes setup and configuration details, including GPU usage.\n",
|
||||
"\n",
|
||||
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
|
||||
"\n",
|
||||
"* [Download](https://ollama.ai/download)\n",
|
||||
"* Fetch a model via `ollama pull <model family>`\n",
|
||||
"* e.g., for `Llama-7b`: `ollama pull llama2`\n",
|
||||
"* This will download the most basic version of the model (e.g., minimum # parameters and 4-bit quantization)\n",
|
||||
"* On Mac, it will download to:\n",
|
||||
"\n",
|
||||
"`~/.ollama/models/manifests/registry.ollama.ai/library/<model family>/latest`\n",
|
||||
"\n",
|
||||
"* And we can specify a particular version, e.g., for `ollama pull vicuna:13b-v1.5-16k-q4_0`\n",
|
||||
"* The file is here with the model version in place of `latest`\n",
|
||||
"\n",
|
||||
"`~/.ollama/models/manifests/registry.ollama.ai/library/vicuna/13b-v1.5-16k-q4_0`\n",
|
||||
"\n",
|
||||
"You can easily access models in a few ways:\n",
|
||||
"\n",
|
||||
"1/ if the app is running:\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Select your model when setting `llm = Ollama(..., model=\"<model family>:<version>\")`\n",
|
||||
"* If you set `llm = Ollama(..., model=\"<model family\")` withoout a version it will simply look for `latest`\n",
|
||||
"\n",
|
||||
"2/ if building from source or just running the binary: \n",
|
||||
"* Then you must run `ollama serve`\n",
|
||||
"* All of your local models are automatically served on `localhost:11434`\n",
|
||||
"* Then, select as shown above\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Usage\n",
|
||||
"\n",
|
||||
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n",
|
||||
"\n",
|
||||
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama2:7b-chat`) then you can use the `ChatOllama` interface.\n",
|
||||
"\n",
|
||||
"This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOllama\n",
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler \n",
|
||||
"chat_model = ChatOllama(model=\"llama2:7b-chat\", \n",
|
||||
" callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With `StreamingStdOutCallbackHandler`, you will see tokens streamed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Artificial intelligence (AI) has a rich and varied history that spans several decades. Hinweis: The following is a brief overview of the major milestones in the history of AI, but it is by no means exhaustive.\n",
|
||||
"\n",
|
||||
"1. Early Beginnings (1950s-1960s): The term \"Artificial Intelligence\" was coined in 1956 by computer scientist John McCarthy. However, the concept of creating machines that can think and learn like humans dates back to ancient times. In the 1950s and 1960s, researchers began exploring the possibilities of AI using simple algorithms and machine learning techniques.\n",
|
||||
"2. Rule-Based Systems (1970s-1980s): In the 1970s and 1980s, AI research focused on developing rule-based systems, which use predefined rules to reason and make decisions. This led to the development of expert systems, which were designed to mimic the decision-making abilities of human experts in specific domains.\n",
|
||||
"3. Machine Learning (1980s-1990s): The 1980s saw a shift towards machine learning, which enables machines to learn from data without being explicitly programmed. This led to the development of algorithms such as decision trees, neural networks, and support vector machines.\n",
|
||||
"4. Deep Learning (2000s-present): In the early 2000s, deep learning emerged as a subfield of machine learning, focusing on neural networks with multiple layers. These networks can learn complex representations of data, leading to breakthroughs in image and speech recognition, natural language processing, and other areas.\n",
|
||||
"5. Natural Language Processing (NLP) (1980s-present): NLP has been an active area of research since the 1980s, with a focus on developing algorithms that can understand and generate human language. This has led to applications such as chatbots, voice assistants, and language translation systems.\n",
|
||||
"6. Robotics (1970s-present): The development of robotics has been closely tied to AI research, with a focus on creating machines that can perform tasks that typically require human intelligence, such as manipulation and locomotion.\n",
|
||||
"7. Computer Vision (1980s-present): Computer vision has been an active area of research since the 1980s, with a focus on enabling machines to interpret and understand visual data from the world around us. This has led to applications such as image recognition, object detection, and autonomous driving.\n",
|
||||
"8. Ethics and Society (1990s-present): As AI technology has become more advanced and integrated into various aspects of society, there has been a growing concern about the ethical implications of AI. This includes issues related to privacy, bias, and job displacement.\n",
|
||||
"9. Reinforcement Learning (2000s-present): Reinforcement learning is a subfield of machine learning that involves training machines to make decisions based on feedback from their environment. This has led to breakthroughs in areas such as game playing, robotics, and autonomous driving.\n",
|
||||
"10. Generative Models (2010s-present): Generative models are a class of AI algorithms that can generate new data that is similar to a given dataset. This has led to applications such as image synthesis, music generation, and language creation.\n",
|
||||
"\n",
|
||||
"These are just a few of the many developments in the history of AI. As the field continues to evolve, we can expect even more exciting breakthroughs and innovations in the years to come."
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=' Artificial intelligence (AI) has a rich and varied history that spans several decades. Hinweis: The following is a brief overview of the major milestones in the history of AI, but it is by no means exhaustive.\\n\\n1. Early Beginnings (1950s-1960s): The term \"Artificial Intelligence\" was coined in 1956 by computer scientist John McCarthy. However, the concept of creating machines that can think and learn like humans dates back to ancient times. In the 1950s and 1960s, researchers began exploring the possibilities of AI using simple algorithms and machine learning techniques.\\n2. Rule-Based Systems (1970s-1980s): In the 1970s and 1980s, AI research focused on developing rule-based systems, which use predefined rules to reason and make decisions. This led to the development of expert systems, which were designed to mimic the decision-making abilities of human experts in specific domains.\\n3. Machine Learning (1980s-1990s): The 1980s saw a shift towards machine learning, which enables machines to learn from data without being explicitly programmed. This led to the development of algorithms such as decision trees, neural networks, and support vector machines.\\n4. Deep Learning (2000s-present): In the early 2000s, deep learning emerged as a subfield of machine learning, focusing on neural networks with multiple layers. These networks can learn complex representations of data, leading to breakthroughs in image and speech recognition, natural language processing, and other areas.\\n5. Natural Language Processing (NLP) (1980s-present): NLP has been an active area of research since the 1980s, with a focus on developing algorithms that can understand and generate human language. This has led to applications such as chatbots, voice assistants, and language translation systems.\\n6. Robotics (1970s-present): The development of robotics has been closely tied to AI research, with a focus on creating machines that can perform tasks that typically require human intelligence, such as manipulation and locomotion.\\n7. Computer Vision (1980s-present): Computer vision has been an active area of research since the 1980s, with a focus on enabling machines to interpret and understand visual data from the world around us. This has led to applications such as image recognition, object detection, and autonomous driving.\\n8. Ethics and Society (1990s-present): As AI technology has become more advanced and integrated into various aspects of society, there has been a growing concern about the ethical implications of AI. This includes issues related to privacy, bias, and job displacement.\\n9. Reinforcement Learning (2000s-present): Reinforcement learning is a subfield of machine learning that involves training machines to make decisions based on feedback from their environment. This has led to breakthroughs in areas such as game playing, robotics, and autonomous driving.\\n10. Generative Models (2010s-present): Generative models are a class of AI algorithms that can generate new data that is similar to a given dataset. This has led to applications such as image synthesis, music generation, and language creation.\\n\\nThese are just a few of the many developments in the history of AI. As the field continues to evolve, we can expect even more exciting breakthroughs and innovations in the years to come.', additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import HumanMessage\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" HumanMessage(content=\"Tell me about the history of AI\")\n",
|
||||
"]\n",
|
||||
"chat_model(messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## RAG\n",
|
||||
"\n",
|
||||
"We can use Olama with RAG, [just as shown here](https://python.langchain.com/docs/use_cases/question_answering/how_to/local_retrieval_qa).\n",
|
||||
"\n",
|
||||
"Let's use the 13b model:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"ollama pull llama2:13b\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Or, the 13b-chat model:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"ollama pull llama2:13b-chat\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Let's also use local embeddings from `GPT4AllEmbeddings` and `Chroma`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install gpt4all chromadb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import WebBaseLoader\n",
|
||||
"loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
|
||||
"data = loader.load()\n",
|
||||
"\n",
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"all_splits = text_splitter.split_documents(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found model file at /Users/rlm/.cache/gpt4all/ggml-all-MiniLM-L6-v2-f16.bin\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.embeddings import GPT4AllEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"4"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"What are the approaches to Task Decomposition?\"\n",
|
||||
"docs = vectorstore.similarity_search(question)\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate\n",
|
||||
"\n",
|
||||
"# Prompt\n",
|
||||
"template = \"\"\"[INST] <<SYS>> Use the following pieces of context to answer the question at the end. \n",
|
||||
"If you don't know the answer, just say that you don't know, don't try to make up an answer. \n",
|
||||
"Use three sentences maximum and keep the answer as concise as possible. <</SYS>>\n",
|
||||
"{context}\n",
|
||||
"Question: {question}\n",
|
||||
"Helpful Answer:[/INST]\"\"\"\n",
|
||||
"QA_CHAIN_PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"context\", \"question\"],\n",
|
||||
" template=template,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Chat model\n",
|
||||
"from langchain.chat_models import ChatOllama\n",
|
||||
"from langchain.callbacks.manager import CallbackManager\n",
|
||||
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
|
||||
"chat_model = ChatOllama(model=\"llama2:13b-chat\",\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# QA chain\n",
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"qa_chain = RetrievalQA.from_chain_type(\n",
|
||||
" chat_model,\n",
|
||||
" retriever=vectorstore.as_retriever(),\n",
|
||||
" chain_type_kwargs={\"prompt\": QA_CHAIN_PROMPT},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Based on the provided context, there are three approaches to task decomposition for AI agents:\n",
|
||||
"\n",
|
||||
"1. LLM with simple prompting, such as \"Steps for XYZ.\" or \"What are the subgoals for achieving XYZ?\"\n",
|
||||
"2. Task-specific instructions, such as \"Write a story outline\" for writing a novel.\n",
|
||||
"3. Human inputs."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"What are the various approaches to Task Decomposition for AI Agents?\"\n",
|
||||
"result = qa_chain({\"query\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also get logging for tokens."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" Based on the given context, here is the answer to the question \"What are the approaches to Task Decomposition?\"\n",
|
||||
"\n",
|
||||
"There are three approaches to task decomposition:\n",
|
||||
"\n",
|
||||
"1. LLM with simple prompting, such as \"Steps for XYZ.\" or \"What are the subgoals for achieving XYZ?\"\n",
|
||||
"2. Using task-specific instructions, like \"Write a story outline\" for writing a novel.\n",
|
||||
"3. With human inputs.{'model': 'llama2:13b-chat', 'created_at': '2023-08-23T15:37:51.469127Z', 'done': True, 'context': [1, 29871, 1, 29961, 25580, 29962, 518, 25580, 29962, 518, 25580, 29962, 3532, 14816, 29903, 6778, 4803, 278, 1494, 12785, 310, 3030, 304, 1234, 278, 1139, 472, 278, 1095, 29889, 29871, 13, 3644, 366, 1016, 29915, 29873, 1073, 278, 1234, 29892, 925, 1827, 393, 366, 1016, 29915, 29873, 1073, 29892, 1016, 29915, 29873, 1018, 304, 1207, 701, 385, 1234, 29889, 29871, 13, 11403, 2211, 25260, 7472, 322, 3013, 278, 1234, 408, 3022, 895, 408, 1950, 29889, 529, 829, 14816, 29903, 6778, 13, 5398, 26227, 508, 367, 2309, 313, 29896, 29897, 491, 365, 26369, 411, 2560, 9508, 292, 763, 376, 7789, 567, 363, 1060, 29979, 29999, 7790, 29876, 29896, 19602, 376, 5618, 526, 278, 1014, 1484, 1338, 363, 3657, 15387, 1060, 29979, 29999, 29973, 613, 313, 29906, 29897, 491, 773, 3414, 29899, 14940, 11994, 29936, 321, 29889, 29887, 29889, 376, 6113, 263, 5828, 27887, 1213, 363, 5007, 263, 9554, 29892, 470, 313, 29941, 29897, 411, 5199, 10970, 29889, 13, 13, 5398, 26227, 508, 367, 2309, 313, 29896, 29897, 491, 365, 26369, 411, 2560, 9508, 292, 763, 376, 7789, 567, 363, 1060, 29979, 29999, 7790, 29876, 29896, 19602, 376, 5618, 526, 278, 1014, 1484, 1338, 363, 3657, 15387, 1060, 29979, 29999, 29973, 613, 313, 29906, 29897, 491, 773, 3414, 29899, 14940, 11994, 29936, 321, 29889, 29887, 29889, 376, 6113, 263, 5828, 27887, 1213, 363, 5007, 263, 9554, 29892, 470, 313, 29941, 29897, 411, 5199, 10970, 29889, 13, 13, 1451, 16047, 267, 297, 1472, 29899, 8489, 18987, 322, 3414, 26227, 29901, 1858, 9450, 975, 263, 3309, 29891, 4955, 322, 17583, 3902, 8253, 278, 1650, 2913, 3933, 18066, 292, 29889, 365, 26369, 29879, 21117, 304, 10365, 13900, 746, 20050, 411, 15668, 4436, 29892, 3907, 963, 3109, 16424, 9401, 304, 25618, 1058, 5110, 515, 14260, 322, 1059, 29889, 13, 13, 1451, 16047, 267, 297, 1472, 29899, 8489, 18987, 322, 3414, 26227, 29901, 1858, 9450, 975, 263, 3309, 29891, 4955, 322, 17583, 3902, 8253, 278, 1650, 2913, 3933, 18066, 292, 29889, 365, 26369, 29879, 21117, 304, 10365, 13900, 746, 20050, 411, 15668, 4436, 29892, 3907, 963, 3109, 16424, 9401, 304, 25618, 1058, 5110, 515, 14260, 322, 1059, 29889, 13, 16492, 29901, 1724, 526, 278, 13501, 304, 9330, 897, 510, 3283, 29973, 13, 29648, 1319, 673, 10834, 29914, 25580, 29962, 518, 29914, 25580, 29962, 518, 29914, 25580, 29962, 29871, 16564, 373, 278, 2183, 3030, 29892, 1244, 338, 278, 1234, 304, 278, 1139, 376, 5618, 526, 278, 13501, 304, 9330, 897, 510, 3283, 3026, 13, 13, 8439, 526, 2211, 13501, 304, 3414, 26227, 29901, 13, 13, 29896, 29889, 365, 26369, 411, 2560, 9508, 292, 29892, 1316, 408, 376, 7789, 567, 363, 1060, 29979, 29999, 1213, 470, 376, 5618, 526, 278, 1014, 1484, 1338, 363, 3657, 15387, 1060, 29979, 29999, 3026, 13, 29906, 29889, 5293, 3414, 29899, 14940, 11994, 29892, 763, 376, 6113, 263, 5828, 27887, 29908, 363, 5007, 263, 9554, 29889, 13, 29941, 29889, 2973, 5199, 10970, 29889, 2], 'total_duration': 9514823750, 'load_duration': 795542, 'sample_count': 99, 'sample_duration': 68732000, 'prompt_eval_count': 146, 'prompt_eval_duration': 6206275000, 'eval_count': 98, 'eval_duration': 3229641000}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.schema import LLMResult\n",
|
||||
"from langchain.callbacks.base import BaseCallbackHandler\n",
|
||||
"\n",
|
||||
"class GenerationStatisticsCallback(BaseCallbackHandler):\n",
|
||||
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
|
||||
" print(response.generations[0][0].generation_info)\n",
|
||||
" \n",
|
||||
"callback_manager = CallbackManager([StreamingStdOutCallbackHandler(), GenerationStatisticsCallback()])\n",
|
||||
"\n",
|
||||
"chat_model = ChatOllama(model=\"llama2:13b-chat\",\n",
|
||||
" verbose=True,\n",
|
||||
" callback_manager=callback_manager)\n",
|
||||
"\n",
|
||||
"qa_chain = RetrievalQA.from_chain_type(\n",
|
||||
" chat_model,\n",
|
||||
" retriever=vectorstore.as_retriever(),\n",
|
||||
" chain_type_kwargs={\"prompt\": QA_CHAIN_PROMPT},\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"question = \"What are the approaches to Task Decomposition?\"\n",
|
||||
"result = qa_chain({\"query\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`eval_count` / (`eval_duration`/10e9) gets `tok / s`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"30.343929867127645"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"98 / (3229641000/1000/1000/1000)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -143,12 +143,39 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c095285d",
|
||||
"cell_type": "markdown",
|
||||
"id": "57e27714",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"## Fine-tuning\n",
|
||||
"\n",
|
||||
"You can call fine-tuned OpenAI models by passing in your corresponding `modelName` parameter.\n",
|
||||
"\n",
|
||||
"This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "33c4a8b0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, example=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"fine_tuned_model = ChatOpenAI(temperature=0, model_name=\"ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR\")\n",
|
||||
"\n",
|
||||
"fine_tuned_model(messages)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -167,7 +194,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
325
docs/extras/integrations/chat_loaders/discord.ipynb
Normal file
325
docs/extras/integrations/chat_loaders/discord.ipynb
Normal file
@@ -0,0 +1,325 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c4ff9336-1cf3-459e-bd70-d1314c1da6a0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Discord\n",
|
||||
"\n",
|
||||
"This notebook shows how to create your own chat loader that works on copy-pasted messages (from dms) to a list of LangChain messages.\n",
|
||||
"\n",
|
||||
"The process has four steps:\n",
|
||||
"1. Create the chat .txt file by copying chats from the Discord app and pasting them in a file on your local computer\n",
|
||||
"2. Copy the chat loader definition from below to a local file.\n",
|
||||
"3. Initialize the `DiscordChatLoader` with the file path pointed to the text file.\n",
|
||||
"4. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion.\n",
|
||||
"\n",
|
||||
"## 1. Creat message dump\n",
|
||||
"\n",
|
||||
"Currently (2023/08/23) this loader only supports .txt files in the format generated by copying messages in the app to your clipboard and pasting in a file. Below is an example."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e4ccfdfa-6869-4d67-90a0-ab99f01b7553",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Overwriting discord_chats.txt\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%writefile discord_chats.txt\n",
|
||||
"talkingtower — 08/15/2023 11:10 AM\n",
|
||||
"Love music! Do you like jazz?\n",
|
||||
"reporterbob — 08/15/2023 9:27 PM\n",
|
||||
"Yes! Jazz is fantastic. Ever heard this one?\n",
|
||||
"Website\n",
|
||||
"Listen to classic jazz track...\n",
|
||||
"\n",
|
||||
"talkingtower — Yesterday at 5:03 AM\n",
|
||||
"Indeed! Great choice. 🎷\n",
|
||||
"reporterbob — Yesterday at 5:23 AM\n",
|
||||
"Thanks! How about some virtual sightseeing?\n",
|
||||
"Website\n",
|
||||
"Virtual tour of famous landmarks...\n",
|
||||
"\n",
|
||||
"talkingtower — Today at 2:38 PM\n",
|
||||
"Sounds fun! Let's explore.\n",
|
||||
"reporterbob — Today at 2:56 PM\n",
|
||||
"Enjoy the tour! See you around.\n",
|
||||
"talkingtower — Today at 3:00 PM\n",
|
||||
"Thank you! Goodbye! 👋\n",
|
||||
"reporterbob — Today at 3:02 PM\n",
|
||||
"Farewell! Happy exploring."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "359565a7-dad3-403c-a73c-6414b1295127",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Define chat loader\n",
|
||||
"\n",
|
||||
"LangChain currently does not support "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a429e0c4-4d7d-45f8-bbbb-c7fc5229f6af",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import logging\n",
|
||||
"import re\n",
|
||||
"from typing import Iterator, List\n",
|
||||
"\n",
|
||||
"from langchain import schema\n",
|
||||
"from langchain.chat_loaders import base as chat_loaders\n",
|
||||
"\n",
|
||||
"logger = logging.getLogger()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class DiscordChatLoader(chat_loaders.BaseChatLoader):\n",
|
||||
" \n",
|
||||
" def __init__(self, path: str):\n",
|
||||
" \"\"\"\n",
|
||||
" Initialize the Discord chat loader.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" path: Path to the exported Discord chat text file.\n",
|
||||
" \"\"\"\n",
|
||||
" self.path = path\n",
|
||||
" self._message_line_regex = re.compile(\n",
|
||||
" r\"(.+?) — (\\w{3,9} \\d{1,2}(?:st|nd|rd|th)?(?:, \\d{4})? \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\", # noqa\n",
|
||||
" flags=re.DOTALL,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def _load_single_chat_session_from_txt(\n",
|
||||
" self, file_path: str\n",
|
||||
" ) -> chat_loaders.ChatSession:\n",
|
||||
" \"\"\"\n",
|
||||
" Load a single chat session from a text file.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" file_path: Path to the text file containing the chat messages.\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" A `ChatSession` object containing the loaded chat messages.\n",
|
||||
" \"\"\"\n",
|
||||
" with open(file_path, \"r\", encoding=\"utf-8\") as file:\n",
|
||||
" lines = file.readlines()\n",
|
||||
"\n",
|
||||
" results: List[schema.BaseMessage] = []\n",
|
||||
" current_sender = None\n",
|
||||
" current_timestamp = None\n",
|
||||
" current_content = []\n",
|
||||
" for line in lines:\n",
|
||||
" if re.match(\n",
|
||||
" r\".+? — (\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\", # noqa\n",
|
||||
" line,\n",
|
||||
" ):\n",
|
||||
" if current_sender and current_content:\n",
|
||||
" results.append(\n",
|
||||
" schema.HumanMessage(\n",
|
||||
" content=\"\".join(current_content).strip(),\n",
|
||||
" additional_kwargs={\n",
|
||||
" \"sender\": current_sender,\n",
|
||||
" \"events\": [{\"message_time\": current_timestamp}],\n",
|
||||
" },\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" current_sender, current_timestamp = line.split(\" — \")[:2]\n",
|
||||
" current_content = [\n",
|
||||
" line[len(current_sender) + len(current_timestamp) + 4 :].strip()\n",
|
||||
" ]\n",
|
||||
" elif re.match(r\"\\[\\d{1,2}:\\d{2} (?:AM|PM)\\]\", line.strip()):\n",
|
||||
" results.append(\n",
|
||||
" schema.HumanMessage(\n",
|
||||
" content=\"\".join(current_content).strip(),\n",
|
||||
" additional_kwargs={\n",
|
||||
" \"sender\": current_sender,\n",
|
||||
" \"events\": [{\"message_time\": current_timestamp}],\n",
|
||||
" },\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" current_timestamp = line.strip()[1:-1]\n",
|
||||
" current_content = []\n",
|
||||
" else:\n",
|
||||
" current_content.append(\"\\n\" + line.strip())\n",
|
||||
"\n",
|
||||
" if current_sender and current_content:\n",
|
||||
" results.append(\n",
|
||||
" schema.HumanMessage(\n",
|
||||
" content=\"\".join(current_content).strip(),\n",
|
||||
" additional_kwargs={\n",
|
||||
" \"sender\": current_sender,\n",
|
||||
" \"events\": [{\"message_time\": current_timestamp}],\n",
|
||||
" },\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return chat_loaders.ChatSession(messages=results)\n",
|
||||
"\n",
|
||||
" def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:\n",
|
||||
" \"\"\"\n",
|
||||
" Lazy load the messages from the chat file and yield them in the required format.\n",
|
||||
"\n",
|
||||
" Yields:\n",
|
||||
" A `ChatSession` object containing the loaded chat messages.\n",
|
||||
" \"\"\"\n",
|
||||
" yield self._load_single_chat_session_from_txt(self.path)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8240393-48be-44d2-b0d6-52c215cd8ac2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Create loader\n",
|
||||
"\n",
|
||||
"We will point to the file we just wrote to disk."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "1268de40-b0e5-445d-9cd8-54856cd0293a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = DiscordChatLoader(\n",
|
||||
" path=\"./discord_chats.txt\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4928df4b-ae31-48a7-bd76-be3ecee1f3e0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Load Messages\n",
|
||||
"\n",
|
||||
"Assuming the format is correct, the loader will convert the chats to langchain messages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "c8a0836d-4a22-4790-bfe9-97f2145bb0d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain.chat_loaders.base import ChatSession\n",
|
||||
"from langchain.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"talkingtower\" to AI messages\n",
|
||||
"messages: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"talkingtower\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "1913963b-c44e-4f7a-aba7-0423c9b8bd59",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'messages': [AIMessage(content='Love music! Do you like jazz?', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': '08/15/2023 11:10 AM\\n'}]}, example=False),\n",
|
||||
" HumanMessage(content='Yes! Jazz is fantastic. Ever heard this one?\\nWebsite\\nListen to classic jazz track...', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': '08/15/2023 9:27 PM\\n'}]}, example=False),\n",
|
||||
" AIMessage(content='Indeed! Great choice. 🎷', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Yesterday at 5:03 AM\\n'}]}, example=False),\n",
|
||||
" HumanMessage(content='Thanks! How about some virtual sightseeing?\\nWebsite\\nVirtual tour of famous landmarks...', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Yesterday at 5:23 AM\\n'}]}, example=False),\n",
|
||||
" AIMessage(content=\"Sounds fun! Let's explore.\", additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Today at 2:38 PM\\n'}]}, example=False),\n",
|
||||
" HumanMessage(content='Enjoy the tour! See you around.', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Today at 2:56 PM\\n'}]}, example=False),\n",
|
||||
" AIMessage(content='Thank you! Goodbye! 👋', additional_kwargs={'sender': 'talkingtower', 'events': [{'message_time': 'Today at 3:00 PM\\n'}]}, example=False),\n",
|
||||
" HumanMessage(content='Farewell! Happy exploring.', additional_kwargs={'sender': 'reporterbob', 'events': [{'message_time': 'Today at 3:02 PM\\n'}]}, example=False)]}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"messages"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8595a518-5c89-44aa-94a7-ca51e7e2a5fa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Next Steps\n",
|
||||
"\n",
|
||||
"You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "08ff0a1e-fca0-4da3-aacd-d7401f99d946",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Thank you! Have a wonderful day! 🌟"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[0]['messages']):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "50a5251f-074a-4a3c-a2b0-b1de85e0ac6a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
579
docs/extras/integrations/chat_loaders/facebook.ipynb
Normal file
579
docs/extras/integrations/chat_loaders/facebook.ipynb
Normal file
@@ -0,0 +1,579 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e4bd269b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Facebook Messenger\n",
|
||||
"\n",
|
||||
"This notebook shows how to load data from Facebook in a format you can finetune on. The overall steps are:\n",
|
||||
"\n",
|
||||
"1. Download your messenger data to disk.\n",
|
||||
"2. Create the Chat Loader and call `loader.load()` (or `loader.lazy_load()`) to perform the conversion.\n",
|
||||
"3. Optionally use `merge_chat_runs` to combine message from the same sender in sequence, and/or `map_ai_messages` to convert messages from the specified sender to the \"AIMessage\" class. Once you've done this, call `convert_messages_for_finetuning` to prepare your data for fine-tuning.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Once this has been done, you can fine-tune your model. To do so you would complete the following steps:\n",
|
||||
"\n",
|
||||
"4. Upload your messages to OpenAI and run a fine-tuning job.\n",
|
||||
"6. Use the resulting model in your LangChain app!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Let's begin.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## 1. Download Data\n",
|
||||
"\n",
|
||||
"To download your own messenger data, following instructions [here](https://www.zapptales.com/en/download-facebook-messenger-chat-history-how-to/). IMPORTANT - make sure to download them in JSON format (not HTML).\n",
|
||||
"\n",
|
||||
"We are hosting an example dump at [this google drive link](https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing) that we will use in this walkthrough."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "647f2158-a42e-4634-b283-b8492caf542a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"File file.zip downloaded.\n",
|
||||
"File file.zip has been unzipped.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This uses some example data\n",
|
||||
"import requests\n",
|
||||
"import zipfile\n",
|
||||
"\n",
|
||||
"def download_and_unzip(url: str, output_path: str = 'file.zip') -> None:\n",
|
||||
" file_id = url.split('/')[-2]\n",
|
||||
" download_url = f'https://drive.google.com/uc?export=download&id={file_id}'\n",
|
||||
"\n",
|
||||
" response = requests.get(download_url)\n",
|
||||
" if response.status_code != 200:\n",
|
||||
" print('Failed to download the file.')\n",
|
||||
" return\n",
|
||||
"\n",
|
||||
" with open(output_path, 'wb') as file:\n",
|
||||
" file.write(response.content)\n",
|
||||
" print(f'File {output_path} downloaded.')\n",
|
||||
"\n",
|
||||
" with zipfile.ZipFile(output_path, 'r') as zip_ref:\n",
|
||||
" zip_ref.extractall()\n",
|
||||
" print(f'File {output_path} has been unzipped.')\n",
|
||||
"\n",
|
||||
"# URL of the file to download\n",
|
||||
"url = 'https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing'\n",
|
||||
"\n",
|
||||
"# Download and unzip\n",
|
||||
"download_and_unzip(url)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "48ef8bb1-fc28-453c-835a-94a552f05a91",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Create Chat Loader\n",
|
||||
"\n",
|
||||
"We have 2 different `FacebookMessengerChatLoader` classes, one for an entire directory of chats, and one to load individual files. We"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a0869bc6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"directory_path = \"./hogwarts\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0460bf25",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_loaders.facebook_messenger import (\n",
|
||||
" SingleFileFacebookMessengerChatLoader,\n",
|
||||
" FolderFacebookMessengerChatLoader,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "f61ee277",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SingleFileFacebookMessengerChatLoader(\n",
|
||||
" path=\"./hogwarts/inbox/HermioneGranger/messages_Hermione_Granger.json\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "ec466ad7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content=\"Hi Hermione! How's your summer going so far?\", additional_kwargs={'sender': 'Harry Potter'}, example=False),\n",
|
||||
" HumanMessage(content=\"Harry! Lovely to hear from you. My summer is going well, though I do miss everyone. I'm spending most of my time going through my books and researching fascinating new topics. How about you?\", additional_kwargs={'sender': 'Hermione Granger'}, example=False),\n",
|
||||
" HumanMessage(content=\"I miss you all too. The Dursleys are being their usual unpleasant selves but I'm getting by. At least I can practice some spells in my room without them knowing. Let me know if you find anything good in your researching!\", additional_kwargs={'sender': 'Harry Potter'}, example=False)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_session = loader.load()[0]\n",
|
||||
"chat_session[\"messages\"][:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "8a3ee473",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = FolderFacebookMessengerChatLoader(\n",
|
||||
" path=\"./hogwarts\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "9f41e122",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"9"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chat_sessions = loader.load()\n",
|
||||
"len(chat_sessions)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d4aa3580-adc1-4b48-9bba-0e8e8d9f44ce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Prepare for fine-tuning\n",
|
||||
"\n",
|
||||
"Calling `load()` returns all the chat messages we could extract as human messages. When conversing with chat bots, conversations typically follow a more strict alternating dialogue pattern relative to real conversations. \n",
|
||||
"\n",
|
||||
"You can choose to merge message \"runs\" (consecutive messages from the same sender) and select a sender to represent the \"AI\". The fine-tuned LLM will learn to generate these AI messages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "5a78030d-b757-4bbe-8a6c-841056f46df7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_loaders.utils import (\n",
|
||||
" merge_chat_runs,\n",
|
||||
" map_ai_messages,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "ff35b028-78bf-4c5b-9ec6-939fe67de7f7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"merged_sessions = merge_chat_runs(chat_sessions)\n",
|
||||
"alternating_sessions = list(map_ai_messages(merged_sessions, \"Harry Potter\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "4b11906e-a496-4d01-9f0d-1938c14147bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Professor Snape, I was hoping I could speak with you for a moment about something that's been concerning me lately.\", additional_kwargs={'sender': 'Harry Potter'}, example=False),\n",
|
||||
" HumanMessage(content=\"What is it, Potter? I'm quite busy at the moment.\", additional_kwargs={'sender': 'Severus Snape'}, example=False),\n",
|
||||
" AIMessage(content=\"I apologize for the interruption, sir. I'll be brief. I've noticed some strange activity around the school grounds at night. I saw a cloaked figure lurking near the Forbidden Forest last night. I'm worried someone may be plotting something sinister.\", additional_kwargs={'sender': 'Harry Potter'}, example=False)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Now all of Harry Potter's messages will take the AI message class\n",
|
||||
"# which maps to the 'assistant' role in OpenAI's training format\n",
|
||||
"alternating_sessions[0]['messages'][:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d985478d-062e-47b9-ae9a-102f59be07c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Now we can convert to OpenAI format dictionaries"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "21372331",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.adapters.openai import convert_messages_for_finetuning"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "92c5ae7a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prepared 9 dialogues for training\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"training_data = convert_messages_for_finetuning(alternating_sessions)\n",
|
||||
"print(f\"Prepared {len(training_data)} dialogues for training\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "dfcbd181",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'role': 'assistant',\n",
|
||||
" 'content': \"Professor Snape, I was hoping I could speak with you for a moment about something that's been concerning me lately.\"},\n",
|
||||
" {'role': 'user',\n",
|
||||
" 'content': \"What is it, Potter? I'm quite busy at the moment.\"},\n",
|
||||
" {'role': 'assistant',\n",
|
||||
" 'content': \"I apologize for the interruption, sir. I'll be brief. I've noticed some strange activity around the school grounds at night. I saw a cloaked figure lurking near the Forbidden Forest last night. I'm worried someone may be plotting something sinister.\"}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"training_data[0][:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f1a9fd64-4f9f-42d3-b5dc-2a340e51e9e7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"OpenAI currently requires at least 10 training examples for a fine-tuning job, though they recommend between 50-100 for most tasks. Since we only have 9 chat sessions, we can subdivide them (optionally with some overlap) so that each training example is comprised of a portion of a whole conversation.\n",
|
||||
"\n",
|
||||
"Facebook chat sessions (1 per person) often span multiple days and conversations,\n",
|
||||
"so the long-range dependencies may not be that important to model anyhow."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "13cd290a-b1e9-4686-bb5e-d99de8b8612b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"100"
|
||||
]
|
||||
},
|
||||
"execution_count": 42,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Our chat is alternating, we will make each datapoint a group of 8 messages,\n",
|
||||
"# with 2 messages overlapping\n",
|
||||
"chunk_size = 8\n",
|
||||
"overlap = 2\n",
|
||||
"\n",
|
||||
"training_examples = [\n",
|
||||
" conversation_messages[i: i + chunk_size] \n",
|
||||
" for conversation_messages in training_data\n",
|
||||
" for i in range(\n",
|
||||
" 0, len(conversation_messages) - chunk_size + 1, \n",
|
||||
" chunk_size - overlap)\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"len(training_examples)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cc8baf41-ff07-4492-96bd-b2472ee7cef9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Fine-tune the model\n",
|
||||
"\n",
|
||||
"It's time to fine-tune the model. Make sure you have `openai` installed\n",
|
||||
"and have set your `OPENAI_API_KEY` appropriately"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "95ce3f63-3c80-44b2-9060-534ad74e16fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# %pip install -U openai --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "ab9e28eb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"File file-zCyNBeg4snpbBL7VkvsuhCz8 ready afer 30.55 seconds.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"from io import BytesIO\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"import openai\n",
|
||||
"\n",
|
||||
"# We will write the jsonl file in memory\n",
|
||||
"my_file = BytesIO()\n",
|
||||
"for m in training_examples:\n",
|
||||
" my_file.write((json.dumps({\"messages\": m}) + \"\\n\").encode('utf-8'))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(\n",
|
||||
" file=my_file,\n",
|
||||
" purpose='fine-tune'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# OpenAI audits each training file for compliance reasons.\n",
|
||||
"# This make take a few minutes\n",
|
||||
"status = openai.File.retrieve(training_file.id).status\n",
|
||||
"start_time = time.time()\n",
|
||||
"while status != \"processed\":\n",
|
||||
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
|
||||
" time.sleep(5)\n",
|
||||
" status = openai.File.retrieve(training_file.id).status\n",
|
||||
"print(f\"File {training_file.id} ready after {time.time() - start_time:.2f} seconds.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "759a7f51-fde9-4b75-aaa9-e600e6537bd1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the file ready, it's time to kick off a training job."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "3f451425",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"job = openai.FineTuningJob.create(\n",
|
||||
" training_file=training_file.id,\n",
|
||||
" model=\"gpt-3.5-turbo\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "489b23ef-5e14-42a9-bafb-44220ec6960b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Grab a cup of tea while your model is being prepared. This may take some time!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 60,
|
||||
"id": "bac1637a-c087-4523-ade1-c47f9bf4c6f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Status=[running]... 908.87s\r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"status = openai.FineTuningJob.retrieve(job.id).status\n",
|
||||
"start_time = time.time()\n",
|
||||
"while status != \"succeeded\":\n",
|
||||
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
|
||||
" time.sleep(5)\n",
|
||||
" job = openai.FineTuningJob.retrieve(job.id)\n",
|
||||
" status = job.status"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 66,
|
||||
"id": "535895e1-bc69-40e5-82ed-e24ed2baeeee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ft:gpt-3.5-turbo-0613:personal::7rDwkaOq\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(job.fine_tuned_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "502ff73b-f9e9-49ce-ba45-401811e57946",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5. Use in LangChain\n",
|
||||
"\n",
|
||||
"You can use the resulting model ID directly the `ChatOpenAI` model class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 67,
|
||||
"id": "3925d60d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(\n",
|
||||
" model=job.fine_tuned_model,\n",
|
||||
" temperature=1,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 69,
|
||||
"id": "7190cf2e-ab34-4ceb-bdad-45f24f069c29",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model | StrOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 72,
|
||||
"id": "f02057e9-f914-40b1-9c9d-9432ff594b98",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The usual - Potions, Transfiguration, Defense Against the Dark Arts. What about you?"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for tok in chain.stream({\"input\": \"What classes are you taking?\"}):\n",
|
||||
" print(tok, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "35331503-3cc6-4d64-955e-64afe6b5fef3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
179
docs/extras/integrations/chat_loaders/gmail.ipynb
Normal file
179
docs/extras/integrations/chat_loaders/gmail.ipynb
Normal file
@@ -0,0 +1,179 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3d1705d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GMail\n",
|
||||
"\n",
|
||||
"This loader goes over how to load data from GMail. There are many ways you could want to load data from GMail. This loader is currently fairly opionated in how to do so. The way it does it is it first looks for all messages that you have sent. It then looks for messages where you are responding to a previous email. It then fetches that previous email, and creates a training example of that email, followed by your email.\n",
|
||||
"\n",
|
||||
"Note that there are clear limitations here. For example, all examples created are only looking at the previous email for context.\n",
|
||||
"\n",
|
||||
"To use:\n",
|
||||
"\n",
|
||||
"- Set up a Google Developer Account: Go to the Google Developer Console, create a project, and enable the Gmail API for that project. This will give you a credentials.json file that you'll need later.\n",
|
||||
"\n",
|
||||
"- Install the Google Client Library: Run the following command to install the Google Client Library:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "84578039",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install --upgrade google-auth google-auth-oauthlib google-auth-httplib2 google-api-python-client"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "be18f796",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os.path\n",
|
||||
"import base64\n",
|
||||
"import json\n",
|
||||
"import re\n",
|
||||
"import time\n",
|
||||
"from google.auth.transport.requests import Request\n",
|
||||
"from google.oauth2.credentials import Credentials\n",
|
||||
"from google_auth_oauthlib.flow import InstalledAppFlow\n",
|
||||
"from googleapiclient.discovery import build\n",
|
||||
"import logging\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"creds = None\n",
|
||||
"# The file token.json stores the user's access and refresh tokens, and is\n",
|
||||
"# created automatically when the authorization flow completes for the first\n",
|
||||
"# time.\n",
|
||||
"if os.path.exists('email_token.json'):\n",
|
||||
" creds = Credentials.from_authorized_user_file('email_token.json', SCOPES)\n",
|
||||
"# If there are no (valid) credentials available, let the user log in.\n",
|
||||
"if not creds or not creds.valid:\n",
|
||||
" if creds and creds.expired and creds.refresh_token:\n",
|
||||
" creds.refresh(Request())\n",
|
||||
" else:\n",
|
||||
" flow = InstalledAppFlow.from_client_secrets_file( \n",
|
||||
" # your creds file here. Please create json file as here https://cloud.google.com/docs/authentication/getting-started\n",
|
||||
" 'creds.json', SCOPES)\n",
|
||||
" creds = flow.run_local_server(port=0)\n",
|
||||
" # Save the credentials for the next run\n",
|
||||
" with open('email_token.json', 'w') as token:\n",
|
||||
" token.write(creds.to_json())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "a2793ba0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_loaders.gmail import GMailLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "2154597f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GMailLoader(creds=creds, n=3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "0b7d11bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "74764bc7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"2"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Sometimes there can be errors which we silently ignore\n",
|
||||
"len(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "d9360a85",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "a9646f7a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# This makes messages sent by hchase@langchain.com the AI Messages\n",
|
||||
"# This means you will train an LLM to predict as if it's responding as hchase\n",
|
||||
"training_data = list(map_ai_messages(data, sender=\"Harrison Chase <hchase@langchain.com>\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d1a182f0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
420
docs/extras/integrations/chat_loaders/imessage.ipynb
Normal file
420
docs/extras/integrations/chat_loaders/imessage.ipynb
Normal file
@@ -0,0 +1,420 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01fcfa2f-33a9-48f3-835a-b1956c394d6b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# iMessage\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the iMessage chat loader. This class helps convert iMessage conversations to LangChain chat messages.\n",
|
||||
"\n",
|
||||
"On MacOS, iMessage stores conversations in a sqlite database at `~/Library/Messages/chat.db` (at least for macOS Ventura 13.4). \n",
|
||||
"The `IMessageChatLoader` loads from this database file. \n",
|
||||
"\n",
|
||||
"1. Create the `IMessageChatLoader` with the file path pointed to `chat.db` database you'd like to process.\n",
|
||||
"2. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion. Optionally use `merge_chat_runs` to combine message from the same sender in sequence, and/or `map_ai_messages` to convert messages from the specified sender to the \"AIMessage\" class.\n",
|
||||
"\n",
|
||||
"## 1. Access Chat DB\n",
|
||||
"\n",
|
||||
"It's likely that your terminal is denied access to `~/Library/Messages`. To use this class, you can copy the DB to an accessible directory (e.g., Documents) and load from there. Alternatively (and not recommended), you can grant full disk access for your terminal emulator in System Settings > Securityand Privacy > Full Disk Access.\n",
|
||||
"\n",
|
||||
"We have created an example database you can use at [this linked drive file](https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "036ce7e0-a38f-4cbe-89a6-a205ae7c23be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"File chat.db downloaded.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This uses some example data\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"def download_drive_file(url: str, output_path: str = 'chat.db') -> None:\n",
|
||||
" file_id = url.split('/')[-2]\n",
|
||||
" download_url = f'https://drive.google.com/uc?export=download&id={file_id}'\n",
|
||||
"\n",
|
||||
" response = requests.get(download_url)\n",
|
||||
" if response.status_code != 200:\n",
|
||||
" print('Failed to download the file.')\n",
|
||||
" return\n",
|
||||
"\n",
|
||||
" with open(output_path, 'wb') as file:\n",
|
||||
" file.write(response.content)\n",
|
||||
" print(f'File {output_path} downloaded.')\n",
|
||||
"\n",
|
||||
"url = 'https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing'\n",
|
||||
"\n",
|
||||
"# Download file to chat.db\n",
|
||||
"download_drive_file(url)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf60f703-76f1-4602-a723-02c59535c1af",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Create the Chat Loader\n",
|
||||
"\n",
|
||||
"Provide the loader with the file path to the zip directory. You can optionally specify the user id that maps to an ai message as well an configure whether to merge message runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4b8b432a-d2bc-49e1-b35f-761730a8fd6d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_loaders.imessage import IMessageChatLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8ec6661b-0aca-48ae-9e2b-6412856c287b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = IMessageChatLoader(\n",
|
||||
" path=\"./chat.db\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8805a7c5-84b4-49f5-8989-0022f2054ace",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Load messages\n",
|
||||
"\n",
|
||||
"The `load()` (or `lazy_load`) methods return a list of \"ChatSessions\" that currently just contain a list of messages per loaded conversation. All messages are mapped to \"HumanMessage\" objects to start. \n",
|
||||
"\n",
|
||||
"You can optionally choose to merge message \"runs\" (consecutive messages from the same sender) and select a sender to represent the \"AI\". The fine-tuned LLM will learn to generate these AI messages."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "fcd69b3e-020d-4a15-8a0d-61c2d34e1ee1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain.chat_loaders.base import ChatSession\n",
|
||||
"from langchain.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"Tortoise\" to AI messages. Do you have a guess who these conversations are between?\n",
|
||||
"chat_sessions: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"Tortoise\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "370b8c26-c7a8-434c-a225-45c20ff14a03",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[AIMessage(content=\"Slow and steady, that's my motto.\", additional_kwargs={'message_time': 1693182723, 'sender': 'Tortoise'}, example=False),\n",
|
||||
" HumanMessage(content='Speed is key!', additional_kwargs={'message_time': 1693182753, 'sender': 'Hare'}, example=False),\n",
|
||||
" AIMessage(content='A balanced approach is more reliable.', additional_kwargs={'message_time': 1693182783, 'sender': 'Tortoise'}, example=False)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Now all of the Tortoise's messages will take the AI message class\n",
|
||||
"# which maps to the 'assistant' role in OpenAI's training format\n",
|
||||
"alternating_sessions[0]['messages'][:3]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "05208f9d-3193-4a8d-86a5-13df2c8197e5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Prepare for fine-tuning\n",
|
||||
"\n",
|
||||
"Now it's time to convert our chat messages to OpenAI dictionaries. We can use the `convert_messages_for_finetuning` utility to do so."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "8834861f-f37f-4c08-96c6-917269bf09b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.adapters.openai import convert_messages_for_finetuning"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "ce7ab0f9-6e6a-4a1c-8b86-c635251d437e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prepared 10 dialogues for training\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"training_data = convert_messages_for_finetuning(alternating_sessions)\n",
|
||||
"print(f\"Prepared {len(training_data)} dialogues for training\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b494d64c-8056-42ae-b4c1-a9cfabc002ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 4. Fine-tune the model\n",
|
||||
"\n",
|
||||
"It's time to fine-tune the model. Make sure you have `openai` installed\n",
|
||||
"and have set your `OPENAI_API_KEY` appropriately"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "b4b60daa-b899-4291-a09a-412ce9c218fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# %pip install -U openai --quiet"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "2cca6c95-c0d6-4826-b4fa-1c403f217f93",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"File file-zHIgf4r8LltZG3RFpkGd4Sjf ready after 10.19 seconds.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"from io import BytesIO\n",
|
||||
"import time\n",
|
||||
"\n",
|
||||
"import openai\n",
|
||||
"\n",
|
||||
"# We will write the jsonl file in memory\n",
|
||||
"my_file = BytesIO()\n",
|
||||
"for m in training_data:\n",
|
||||
" my_file.write((json.dumps({\"messages\": m}) + \"\\n\").encode('utf-8'))\n",
|
||||
"\n",
|
||||
"my_file.seek(0)\n",
|
||||
"training_file = openai.File.create(\n",
|
||||
" file=my_file,\n",
|
||||
" purpose='fine-tune'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# OpenAI audits each training file for compliance reasons.\n",
|
||||
"# This make take a few minutes\n",
|
||||
"status = openai.File.retrieve(training_file.id).status\n",
|
||||
"start_time = time.time()\n",
|
||||
"while status != \"processed\":\n",
|
||||
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
|
||||
" time.sleep(5)\n",
|
||||
" status = openai.File.retrieve(training_file.id).status\n",
|
||||
"print(f\"File {training_file.id} ready after {time.time() - start_time:.2f} seconds.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "60ee0476-3113-4dc8-a886-bce878c60b07",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"With the file ready, it's time to kick off a training job."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "c376ddca-5b4f-4e5a-bf4e-6beeb467eacc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"job = openai.FineTuningJob.create(\n",
|
||||
" training_file=training_file.id,\n",
|
||||
" model=\"gpt-3.5-turbo\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "09344c60-0bee-4989-b8d1-4a8821553cc3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Grab a cup of tea while your model is being prepared. This may take some time!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "22eae900-04ca-456b-ba51-1dfff1f8e0e1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Status=[running]... 524.95s\r"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"status = openai.FineTuningJob.retrieve(job.id).status\n",
|
||||
"start_time = time.time()\n",
|
||||
"while status != \"succeeded\":\n",
|
||||
" print(f\"Status=[{status}]... {time.time() - start_time:.2f}s\", end=\"\\r\", flush=True)\n",
|
||||
" time.sleep(5)\n",
|
||||
" job = openai.FineTuningJob.retrieve(job.id)\n",
|
||||
" status = job.status"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "39e72616-a7d9-44b8-a4eb-506611d119f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ft:gpt-3.5-turbo-0613:personal::7sKoRdlz\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(job.fine_tuned_model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0d717749-b1b6-451f-b3c5-3286b82d45b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5. Use in LangChain\n",
|
||||
"\n",
|
||||
"You can use the resulting model ID directly the `ChatOpenAI` model class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "1579dfca-95c6-47b7-8549-1195b9dce5b0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(\n",
|
||||
" model=job.fine_tuned_model,\n",
|
||||
" temperature=1,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "6f53d1b1-dcbf-4976-a61a-17f74c6f1b0a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"system\", \"You are speaking to hare.\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"chain = prompt | model | StrOutputParser()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "6619c9bc-54ea-4136-bd9a-44557f7da724",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"A symbol of interconnectedness."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for tok in chain.stream({\"input\": \"What's the golden thread?\"}):\n",
|
||||
" print(tok, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "88e0d1a1-48a9-4d9d-9f4e-010cbbb65af8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
188
docs/extras/integrations/chat_loaders/index.mdx
Normal file
188
docs/extras/integrations/chat_loaders/index.mdx
Normal file
@@ -0,0 +1,188 @@
|
||||
---
|
||||
sidebar_position: 0
|
||||
---
|
||||
|
||||
# Chat loaders
|
||||
|
||||
Like document loaders, chat loaders are utilities designed to help load conversations from popular communication platforms such as Facebook, Slack, Discord, etc. These are loaded into memory as LangChain chat message objects. Such utilities facilitate tasks such as fine-tuning a language model to match your personal style or voice.
|
||||
|
||||
This brief guide will illustrate the process using [OpenAI's fine-tuning API](https://platform.openai.com/docs/guides/fine-tuning) comprised of six steps:
|
||||
|
||||
1. Export your Facebook Messenger chat data in a compatible format for your intended chat loader.
|
||||
2. Load the chat data into memory as LangChain chat message objects. (_this is what is covered in each integration notebook in this section of the documentation_).
|
||||
- Assign a person to the "AI" role and optionally filter, group, and merge messages.
|
||||
3. Export these acquired messages in a format expected by the fine-tuning API.
|
||||
4. Upload this data to OpenAI.
|
||||
5. Fine-tune your model.
|
||||
6. Implement the fine-tuned model in LangChain.
|
||||
|
||||
This guide is not wholly comprehensive but is designed to take you through the fundamentals of going from raw data to fine-tuned model.
|
||||
|
||||
We will demonstrate the procedure through an example of fine-tuning a `gpt-3.5-turbo` model on Facebook Messenger data.
|
||||
|
||||
### 1. Export your chat data
|
||||
|
||||
To export your Facebook messenger data, you can follow the [instructions here](https://www.zapptales.com/en/download-facebook-messenger-chat-history-how-to/).
|
||||
|
||||
:::important JSON format
|
||||
You must select "JSON format" (instead of HTML) when exporting your data to be compatible with the current loader.
|
||||
:::
|
||||
|
||||
OpenAI requires at least 10 examples to fine-tune your model, but they recommend between 50-100 for more optimal results.
|
||||
You can use the example data stored at [this google drive link](https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing) to test the process.
|
||||
|
||||
### 2. Load the chat
|
||||
|
||||
Once you've obtained your chat data, you can load it into memory as LangChain chat message objects. Here’s an example of loading data using the Python code:
|
||||
|
||||
```python
|
||||
from langchain.chat_loaders.facebook_messenger import FolderFacebookMessengerChatLoader
|
||||
|
||||
loader = FolderFacebookMessengerChatLoader(
|
||||
path="./facebook_messenger_chats",
|
||||
)
|
||||
|
||||
chat_sessions = loader.load()
|
||||
```
|
||||
|
||||
In this snippet, we point the loader to a directory of Facebook chat dumps which are then loaded as multiple "sessions" of messages, one session per conversation file.
|
||||
|
||||
Once you've loaded the messages, you should decide which person you want to fine-tune the model to (usually yourself). You can also decide to merge consecutive messages from the same sender into a single chat message.
|
||||
For both of these tasks, you can use the chat_loaders utilities to do so:
|
||||
|
||||
```
|
||||
from langchain.chat_loaders.utils import (
|
||||
merge_chat_runs,
|
||||
map_ai_messages,
|
||||
)
|
||||
|
||||
merged_sessions = merge_chat_runs(chat_sessions)
|
||||
alternating_sessions = list(map_ai_messages(merged_sessions, "My Name"))
|
||||
```
|
||||
|
||||
### 3. Export messages to OpenAI format
|
||||
|
||||
Convert the chat messages to dictionaries using the `convert_messages_for_finetuning` function. Then, group the data into chunks for better context modeling and overlap management.
|
||||
|
||||
```python
|
||||
from langchain.adapters.openai import convert_messages_for_finetuning
|
||||
|
||||
openai_messages = convert_messages_for_finetuning(chat_sessions)
|
||||
```
|
||||
|
||||
At this point, the data is ready for upload to OpenAI. You can choose to split up conversations into smaller chunks for training if you
|
||||
do not have enough conversations to train on. Feel free to play around with different chunk sizes or with adding system messages to the fine-tuning data.
|
||||
|
||||
```python
|
||||
chunk_size = 8
|
||||
overlap = 2
|
||||
|
||||
message_groups = [
|
||||
conversation_messages[i: i + chunk_size]
|
||||
for conversation_messages in openai_messages
|
||||
for i in range(
|
||||
0, len(conversation_messages) - chunk_size + 1,
|
||||
chunk_size - overlap)
|
||||
]
|
||||
|
||||
len(message_groups)
|
||||
# 9
|
||||
```
|
||||
|
||||
### 4. Upload the data to OpenAI
|
||||
|
||||
Ensure you have set your OpenAI API key by following these [instructions](https://platform.openai.com/account/api-keys), then upload the training file.
|
||||
An audit is performed to ensure data compliance, so you may have to wait a few minutes for the dataset to become ready for use.
|
||||
|
||||
```python
|
||||
import time
|
||||
import json
|
||||
import io
|
||||
|
||||
import openai
|
||||
|
||||
my_file = io.BytesIO()
|
||||
for group in message_groups:
|
||||
my_file.write((json.dumps({"messages": group}) + "\n").encode('utf-8'))
|
||||
|
||||
my_file.seek(0)
|
||||
training_file = openai.File.create(
|
||||
file=my_file,
|
||||
purpose='fine-tune'
|
||||
)
|
||||
|
||||
# Wait while the file is processed
|
||||
status = openai.File.retrieve(training_file.id).status
|
||||
start_time = time.time()
|
||||
while status != "processed":
|
||||
print(f"Status=[{status}]... {time.time() - start_time:.2f}s", end="\r", flush=True)
|
||||
time.sleep(5)
|
||||
status = openai.File.retrieve(training_file.id).status
|
||||
print(f"File {training_file.id} ready after {time.time() - start_time:.2f} seconds.")
|
||||
```
|
||||
|
||||
Once this is done, you can proceed to the model training!
|
||||
|
||||
### 5. Fine-tune the model
|
||||
|
||||
Start the fine-tuning job with your chosen base model.
|
||||
|
||||
```python
|
||||
job = openai.FineTuningJob.create(
|
||||
training_file=training_file.id,
|
||||
model="gpt-3.5-turbo",
|
||||
)
|
||||
```
|
||||
|
||||
This might take a while. Check the status with `openai.FineTuningJob.retrieve(job.id).status` and wait for it to report `succeeded`.
|
||||
|
||||
```python
|
||||
# It may take 10-20+ minutes to complete training.
|
||||
status = openai.FineTuningJob.retrieve(job.id).status
|
||||
start_time = time.time()
|
||||
while status != "succeeded":
|
||||
print(f"Status=[{status}]... {time.time() - start_time:.2f}s", end="\r", flush=True)
|
||||
time.sleep(5)
|
||||
job = openai.FineTuningJob.retrieve(job.id)
|
||||
status = job.status
|
||||
```
|
||||
|
||||
### 6. Use the model in LangChain
|
||||
|
||||
You're almost there! Use the fine-tuned model in LangChain.
|
||||
|
||||
```python
|
||||
from langchain import chat_models
|
||||
|
||||
model_name = job.fine_tuned_model
|
||||
# Example: ft:gpt-3.5-turbo-0613:personal::5mty86jblapsed
|
||||
model = chat_models.ChatOpenAI(model=model_name)
|
||||
```
|
||||
|
||||
```python
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
from langchain.schema.output_parser import StrOutputParser
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("human", "{input}"),
|
||||
]
|
||||
)
|
||||
|
||||
chain = prompt | model | StrOutputParser()
|
||||
|
||||
for tok in chain.stream({"input": "What classes are you taking?"}):
|
||||
print(tok, end="", flush=True)
|
||||
|
||||
# The usual - Potions, Transfiguration, Defense Against the Dark Arts. What about you?
|
||||
```
|
||||
|
||||
And that's it! You've successfully fine-tuned a model and used it in LangChain.
|
||||
|
||||
## Supported Chat Loaders
|
||||
|
||||
LangChain currently supports the following chat loaders. Feel free to contribute more!
|
||||
|
||||
import DocCardList from "@theme/DocCardList";
|
||||
|
||||
<DocCardList />
|
||||
163
docs/extras/integrations/chat_loaders/slack.ipynb
Normal file
163
docs/extras/integrations/chat_loaders/slack.ipynb
Normal file
@@ -0,0 +1,163 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "01fcfa2f-33a9-48f3-835a-b1956c394d6b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Slack\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the Slack chat loader. This class helps map exported slack conversations to LangChain chat messages.\n",
|
||||
"\n",
|
||||
"The process has three steps:\n",
|
||||
"1. Export the desired conversation thread by following the [instructions here](https://slack.com/help/articles/1500001548241-Request-to-export-all-conversations).\n",
|
||||
"2. Create the `SlackChatLoader` with the file path pointed to the json file or directory of JSON files\n",
|
||||
"3. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion. Optionally use `merge_chat_runs` to combine message from the same sender in sequence, and/or `map_ai_messages` to convert messages from the specified sender to the \"AIMessage\" class.\n",
|
||||
"\n",
|
||||
"## 1. Creat message dump\n",
|
||||
"\n",
|
||||
"Currently (2023/08/23) this loader best supports a zip directory of files in the format generated by exporting your a direct message converstion from Slack. Follow up-to-date instructions from slack on how to do so.\n",
|
||||
"\n",
|
||||
"We have an example in the LangChain repo."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a79d35bf-5f21-4063-84bf-a60845c1c51f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"permalink = \"https://raw.githubusercontent.com/langchain-ai/langchain/342087bdfa3ac31d622385d0f2d09cf5e06c8db3/libs/langchain/tests/integration_tests/examples/slack_export.zip\"\n",
|
||||
"response = requests.get(permalink)\n",
|
||||
"with open(\"slack_dump.zip\", \"wb\") as f:\n",
|
||||
" f.write(response.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf60f703-76f1-4602-a723-02c59535c1af",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Create the Chat Loader\n",
|
||||
"\n",
|
||||
"Provide the loader with the file path to the zip directory. You can optionally specify the user id that maps to an ai message as well an configure whether to merge message runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4b8b432a-d2bc-49e1-b35f-761730a8fd6d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_loaders.slack import SlackChatLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "8ec6661b-0aca-48ae-9e2b-6412856c287b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = SlackChatLoader(\n",
|
||||
" path=\"slack_dump.zip\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8805a7c5-84b4-49f5-8989-0022f2054ace",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Load messages\n",
|
||||
"\n",
|
||||
"The `load()` (or `lazy_load`) methods return a list of \"ChatSessions\" that currently just contain a list of messages per loaded conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "fcd69b3e-020d-4a15-8a0d-61c2d34e1ee1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain.chat_loaders.base import ChatSession\n",
|
||||
"from langchain.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"U0500003428\" to AI messages\n",
|
||||
"messages: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"U0500003428\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7d033f87-cd0c-4f44-a753-41b871c1e919",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Next Steps\n",
|
||||
"\n",
|
||||
"You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "7d8a1629-5d9e-49b3-b978-3add57027d59",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Hi, \n",
|
||||
"\n",
|
||||
"I hope you're doing well. I wanted to reach out and ask if you'd be available to meet up for coffee sometime next week. I'd love to catch up and hear about what's been going on in your life. Let me know if you're interested and we can find a time that works for both of us. \n",
|
||||
"\n",
|
||||
"Looking forward to hearing from you!\n",
|
||||
"\n",
|
||||
"Best, [Your Name]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[1]['messages']):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
206
docs/extras/integrations/chat_loaders/telegram.ipynb
Normal file
206
docs/extras/integrations/chat_loaders/telegram.ipynb
Normal file
@@ -0,0 +1,206 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "735455a6-f82e-4252-b545-27385ef883f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Telegram\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the Telegram chat loader. This class helps map exported Telegram conversations to LangChain chat messages.\n",
|
||||
"\n",
|
||||
"The process has three steps:\n",
|
||||
"1. Export the chat .txt file by copying chats from the Discord app and pasting them in a file on your local computer\n",
|
||||
"2. Create the `TelegramChatLoader` with the file path pointed to the json file or directory of JSON files\n",
|
||||
"3. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion. Optionally use `merge_chat_runs` to combine message from the same sender in sequence, and/or `map_ai_messages` to convert messages from the specified sender to the \"AIMessage\" class.\n",
|
||||
"\n",
|
||||
"## 1. Creat message dump\n",
|
||||
"\n",
|
||||
"Currently (2023/08/23) this loader best supports json files in the format generated by exporting your chat history from the [Telegram Desktop App](https://desktop.telegram.org/).\n",
|
||||
"\n",
|
||||
"**Important:** There are 'lite' versions of telegram such as \"Telegram for MacOS\" that lack the export functionality. Please make sure you use the correct app to export the file.\n",
|
||||
"\n",
|
||||
"To make the export:\n",
|
||||
"1. Download and open telegram desktop\n",
|
||||
"2. Select a conversation\n",
|
||||
"3. Navigate to the conversation settings (currently the three dots in the top right corner)\n",
|
||||
"4. Click \"Export Chat History\"\n",
|
||||
"5. Unselect photos and other media. Select \"Machine-readable JSON\" format to export.\n",
|
||||
"\n",
|
||||
"An example is below: "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "285f2044-0f58-4b92-addb-9f8569076734",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Overwriting telegram_conversation.json\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%writefile telegram_conversation.json\n",
|
||||
"{\n",
|
||||
" \"name\": \"Jiminy\",\n",
|
||||
" \"type\": \"personal_chat\",\n",
|
||||
" \"id\": 5965280513,\n",
|
||||
" \"messages\": [\n",
|
||||
" {\n",
|
||||
" \"id\": 1,\n",
|
||||
" \"type\": \"message\",\n",
|
||||
" \"date\": \"2023-08-23T13:11:23\",\n",
|
||||
" \"date_unixtime\": \"1692821483\",\n",
|
||||
" \"from\": \"Jiminy Cricket\",\n",
|
||||
" \"from_id\": \"user123450513\",\n",
|
||||
" \"text\": \"You better trust your conscience\",\n",
|
||||
" \"text_entities\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"plain\",\n",
|
||||
" \"text\": \"You better trust your conscience\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"id\": 2,\n",
|
||||
" \"type\": \"message\",\n",
|
||||
" \"date\": \"2023-08-23T13:13:20\",\n",
|
||||
" \"date_unixtime\": \"1692821600\",\n",
|
||||
" \"from\": \"Batman & Robin\",\n",
|
||||
" \"from_id\": \"user6565661032\",\n",
|
||||
" \"text\": \"What did you just say?\",\n",
|
||||
" \"text_entities\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"plain\",\n",
|
||||
" \"text\": \"What did you just say?\"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7cc109f4-4c92-4cd3-8143-c322776c3f03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Create the Chat Loader\n",
|
||||
"\n",
|
||||
"All that's required is the file path. You can optionally specify the user name that maps to an ai message as well an configure whether to merge message runs."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "111f7767-573c-42d4-86f0-bd766bbaa071",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_loaders.telegram import TelegramChatLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "a4226efa-2640-4990-a20c-6861d1887329",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TelegramChatLoader(\n",
|
||||
" path=\"./telegram_conversation.json\", \n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71699fb7-7815-4c89-8d96-30e8fada6923",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Load messages\n",
|
||||
"\n",
|
||||
"The `load()` (or `lazy_load`) methods return a list of \"ChatSessions\" that currently just contain a list of messages per loaded conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "81121efb-c875-4a77-ad1e-fe26b3d7e812",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain.chat_loaders.base import ChatSession\n",
|
||||
"from langchain.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"Jiminy Cricket\" to AI messages\n",
|
||||
"messages: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"Jiminy Cricket\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b9089c05-7375-41ca-a2f9-672a845314e4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Next Steps\n",
|
||||
"\n",
|
||||
"You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "637a6f5d-6944-4722-9361-a76ef5e9dd2a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"I said, \"You better trust your conscience.\""
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[0]['messages']):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
77
docs/extras/integrations/chat_loaders/twitter.ipynb
Normal file
77
docs/extras/integrations/chat_loaders/twitter.ipynb
Normal file
@@ -0,0 +1,77 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d86853d2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Twitter (via Apify)\n",
|
||||
"\n",
|
||||
"This notebook shows how to load chat messages from Twitter to finetune on. We do this by utilizing Apify. \n",
|
||||
"\n",
|
||||
"First, use Apify to export tweets. An example"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e5034b4e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"from langchain.schema import AIMessage\n",
|
||||
"from langchain.adapters.openai import convert_message_to_dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8bf0fb93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open('example_data/dataset_twitter-scraper_2023-08-23_22-13-19-740.json') as f:\n",
|
||||
" data = json.load(f)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "468124fa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Filter out tweets that reference other tweets, because it's a bit weird\n",
|
||||
"tweets = [d[\"full_text\"] for d in data if \"t.co\" not in d['full_text']]\n",
|
||||
"# Create them as AI messages\n",
|
||||
"messages = [AIMessage(content=t) for t in tweets]\n",
|
||||
"# Add in a system message at the start\n",
|
||||
"# TODO: we could try to extract the subject from the tweets, and put that in the system message.\n",
|
||||
"system_message = {\"role\": \"system\", \"content\": \"write a tweet\"}\n",
|
||||
"data = [[system_message, convert_message_to_dict(m)] for m in messages]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
204
docs/extras/integrations/chat_loaders/whatsapp.ipynb
Normal file
204
docs/extras/integrations/chat_loaders/whatsapp.ipynb
Normal file
@@ -0,0 +1,204 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "735455a6-f82e-4252-b545-27385ef883f4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# WhatsApp\n",
|
||||
"\n",
|
||||
"This notebook shows how to use the WhatsApp chat loader. This class helps map exported Telegram conversations to LangChain chat messages.\n",
|
||||
"\n",
|
||||
"The process has three steps:\n",
|
||||
"1. Export the chat conversations to computer\n",
|
||||
"2. Create the `WhatsAppChatLoader` with the file path pointed to the json file or directory of JSON files\n",
|
||||
"3. Call `loader.load()` (or `loader.lazy_load()`) to perform the conversion.\n",
|
||||
"\n",
|
||||
"## 1. Creat message dump\n",
|
||||
"\n",
|
||||
"To make the export of your WhatsApp conversation(s), complete the following steps:\n",
|
||||
"\n",
|
||||
"1. Open the target conversation\n",
|
||||
"2. Click the three dots in the top right corner and select \"More\".\n",
|
||||
"3. Then select \"Export chat\" and choose \"Without media\".\n",
|
||||
"\n",
|
||||
"An example of the data format for each converation is below: "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "285f2044-0f58-4b92-addb-9f8569076734",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Writing whatsapp_chat.txt\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%writefile whatsapp_chat.txt\n",
|
||||
"[8/15/23, 9:12:33 AM] Dr. Feather: Messages and calls are end-to-end encrypted. No one outside of this chat, not even WhatsApp, can read or listen to them.\n",
|
||||
"[8/15/23, 9:12:43 AM] Dr. Feather: I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest. Such a magnificent creature!\n",
|
||||
"[8/15/23, 9:12:48 AM] Dr. Feather: image omitted\n",
|
||||
"[8/15/23, 9:13:15 AM] Jungle Jane: That's stunning! Were you able to observe its behavior?\n",
|
||||
"[8/15/23, 9:13:23 AM] Dr. Feather: image omitted\n",
|
||||
"[8/15/23, 9:14:02 AM] Dr. Feather: Yes, it seemed quite social with other macaws. They're known for their playful nature.\n",
|
||||
"[8/15/23, 9:14:15 AM] Jungle Jane: How's the research going on parrot communication?\n",
|
||||
"[8/15/23, 9:14:30 AM] Dr. Feather: image omitted\n",
|
||||
"[8/15/23, 9:14:50 AM] Dr. Feather: It's progressing well. We're learning so much about how they use sound and color to communicate.\n",
|
||||
"[8/15/23, 9:15:10 AM] Jungle Jane: That's fascinating! Can't wait to read your paper on it.\n",
|
||||
"[8/15/23, 9:15:20 AM] Dr. Feather: Thank you! I'll send you a draft soon.\n",
|
||||
"[8/15/23, 9:25:16 PM] Jungle Jane: Looking forward to it! Keep up the great work."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7cc109f4-4c92-4cd3-8143-c322776c3f03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 2. Create the Chat Loader\n",
|
||||
"\n",
|
||||
"The WhatsAppChatLoader accepts the resulting zip file, unzipped directory, or the path to any of the chat `.txt` files therein.\n",
|
||||
"\n",
|
||||
"Provide that as well as the user name you want to take on the role of \"AI\" when finetuning."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "111f7767-573c-42d4-86f0-bd766bbaa071",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chat_loaders.whatsapp import WhatsAppChatLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "a4226efa-2640-4990-a20c-6861d1887329",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = WhatsAppChatLoader(\n",
|
||||
" path=\"./whatsapp_chat.txt\", \n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "71699fb7-7815-4c89-8d96-30e8fada6923",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 3. Load messages\n",
|
||||
"\n",
|
||||
"The `load()` (or `lazy_load`) methods return a list of \"ChatSessions\" that currently store the list of messages per loaded conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "81121efb-c875-4a77-ad1e-fe26b3d7e812",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'messages': [AIMessage(content='I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest. Such a magnificent creature!', additional_kwargs={'sender': 'Dr. Feather', 'events': [{'message_time': '8/15/23, 9:12:43 AM'}]}, example=False),\n",
|
||||
" HumanMessage(content=\"That's stunning! Were you able to observe its behavior?\", additional_kwargs={'sender': 'Jungle Jane', 'events': [{'message_time': '8/15/23, 9:13:15 AM'}]}, example=False),\n",
|
||||
" AIMessage(content=\"Yes, it seemed quite social with other macaws. They're known for their playful nature.\", additional_kwargs={'sender': 'Dr. Feather', 'events': [{'message_time': '8/15/23, 9:14:02 AM'}]}, example=False),\n",
|
||||
" HumanMessage(content=\"How's the research going on parrot communication?\", additional_kwargs={'sender': 'Jungle Jane', 'events': [{'message_time': '8/15/23, 9:14:15 AM'}]}, example=False),\n",
|
||||
" AIMessage(content=\"It's progressing well. We're learning so much about how they use sound and color to communicate.\", additional_kwargs={'sender': 'Dr. Feather', 'events': [{'message_time': '8/15/23, 9:14:50 AM'}]}, example=False),\n",
|
||||
" HumanMessage(content=\"That's fascinating! Can't wait to read your paper on it.\", additional_kwargs={'sender': 'Jungle Jane', 'events': [{'message_time': '8/15/23, 9:15:10 AM'}]}, example=False),\n",
|
||||
" AIMessage(content=\"Thank you! I'll send you a draft soon.\", additional_kwargs={'sender': 'Dr. Feather', 'events': [{'message_time': '8/15/23, 9:15:20 AM'}]}, example=False),\n",
|
||||
" HumanMessage(content='Looking forward to it! Keep up the great work.', additional_kwargs={'sender': 'Jungle Jane', 'events': [{'message_time': '8/15/23, 9:25:16 PM'}]}, example=False)]}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain.chat_loaders.base import ChatSession\n",
|
||||
"from langchain.chat_loaders.utils import (\n",
|
||||
" map_ai_messages,\n",
|
||||
" merge_chat_runs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"raw_messages = loader.lazy_load()\n",
|
||||
"# Merge consecutive messages from the same sender into a single message\n",
|
||||
"merged_messages = merge_chat_runs(raw_messages)\n",
|
||||
"# Convert messages from \"Dr. Feather\" to AI messages\n",
|
||||
"messages: List[ChatSession] = list(map_ai_messages(merged_messages, sender=\"Dr. Feather\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b9089c05-7375-41ca-a2f9-672a845314e4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Next Steps\n",
|
||||
"\n",
|
||||
"You can then use these messages how you see fit, such as finetuning a model, few-shot example selection, or directly make predictions for the next message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "637a6f5d-6944-4722-9361-a76ef5e9dd2a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Thank you for the encouragement! I'll do my best to continue studying and sharing fascinating insights about parrot communication."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(messages[0]['messages']):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "16156643-cfbd-444f-b4ae-198eb44f0267",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user