mirror of
https://github.com/hwchase17/langchain.git
synced 2026-04-20 22:08:07 +00:00
Compare commits
222 Commits
harrison/i
...
wfh/gemini
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
92b0dd3d3a | ||
|
|
b56bfd529b | ||
|
|
251fd93084 | ||
|
|
0de98e7830 | ||
|
|
b46802ad88 | ||
|
|
247950ebaa | ||
|
|
7051e83ae0 | ||
|
|
76fb19f205 | ||
|
|
d1a0140659 | ||
|
|
9e745c8e91 | ||
|
|
0de6e09659 | ||
|
|
3e0c5a331c | ||
|
|
34709ec34f | ||
|
|
279155f02d | ||
|
|
a2c447938c | ||
|
|
f541e0235c | ||
|
|
89f74bf808 | ||
|
|
dee5fdfe2b | ||
|
|
a7cbdccb8a | ||
|
|
91dd47b12a | ||
|
|
a05230a4ba | ||
|
|
18aba7fdef | ||
|
|
af2ad3bb4f | ||
|
|
60fc995c3b | ||
|
|
c3232a8aa3 | ||
|
|
243465da3e | ||
|
|
32499164ab | ||
|
|
9a5a52dd99 | ||
|
|
deab168da5 | ||
|
|
d7793a0a66 | ||
|
|
52052cc7b9 | ||
|
|
e4d6e55c5e | ||
|
|
eb209e7ee3 | ||
|
|
b2280fd874 | ||
|
|
7186faefb2 | ||
|
|
76f30f5297 | ||
|
|
f0304a8d9e | ||
|
|
bbc795b752 | ||
|
|
6a0a7a7f66 | ||
|
|
de690b081e | ||
|
|
cea3d61bf4 | ||
|
|
5631e7e397 | ||
|
|
54040b00a4 | ||
|
|
a66df25a89 | ||
|
|
8226b81fb3 | ||
|
|
280aec4e1d | ||
|
|
12039a0057 | ||
|
|
85eae95764 | ||
|
|
6b1af2d19c | ||
|
|
fd2fe4aacf | ||
|
|
d6403c6696 | ||
|
|
11fc0a5004 | ||
|
|
ce884f801f | ||
|
|
db6bf8b022 | ||
|
|
75ade6116d | ||
|
|
a7271cf5bd | ||
|
|
8ccc18fd73 | ||
|
|
77c38df36c | ||
|
|
8f95a8206b | ||
|
|
e5bd32ff6d | ||
|
|
f62331a20c | ||
|
|
f922f9fca3 | ||
|
|
c6c9d93283 | ||
|
|
e08017fcf7 | ||
|
|
a2129713c3 | ||
|
|
cc76f0e834 | ||
|
|
ce4d81f88b | ||
|
|
960faa6ebe | ||
|
|
867ca6d0be | ||
|
|
c4f32cb0e9 | ||
|
|
f56c0359dc | ||
|
|
7bdfc43766 | ||
|
|
b9087e765d | ||
|
|
0dea8cc62d | ||
|
|
2aaf8e11e0 | ||
|
|
38813d7090 | ||
|
|
ad6dfb6220 | ||
|
|
d4d64daa1e | ||
|
|
06e3316f54 | ||
|
|
5efaedf488 | ||
|
|
86b08d7753 | ||
|
|
98c4f2a5fe | ||
|
|
31c9081246 | ||
|
|
f5ed74d56f | ||
|
|
e1ea191237 | ||
|
|
a1a11ffd78 | ||
|
|
b05c46074b | ||
|
|
9e5d146409 | ||
|
|
8f403ea2d7 | ||
|
|
64d5108f99 | ||
|
|
ab6b41937a | ||
|
|
7c2ef06136 | ||
|
|
20d2b4a6ba | ||
|
|
7205bfdd00 | ||
|
|
fd5be55a7b | ||
|
|
c215a4c9ec | ||
|
|
85b88c33f3 | ||
|
|
62b59048de | ||
|
|
5a23608c41 | ||
|
|
63fdc6e818 | ||
|
|
667ad6a5de | ||
|
|
9401539e43 | ||
|
|
5535c7851a | ||
|
|
d22c13ec48 | ||
|
|
ee1478b6e1 | ||
|
|
29e993a5f2 | ||
|
|
a74c03da3c | ||
|
|
66848871fc | ||
|
|
3b75d37cee | ||
|
|
8b0060184d | ||
|
|
0f02e94565 | ||
|
|
6607cc6eab | ||
|
|
80637727ea | ||
|
|
b2e756c0a8 | ||
|
|
4a5a13aab3 | ||
|
|
bf7b59ec44 | ||
|
|
7ad75edf8b | ||
|
|
f758c8adc4 | ||
|
|
77a15fa988 | ||
|
|
dcccf8fa66 | ||
|
|
e0c03d6c44 | ||
|
|
ea0afd07ca | ||
|
|
5cb3393e20 | ||
|
|
74c7b799ef | ||
|
|
abbba6c7d8 | ||
|
|
8eab4d95c0 | ||
|
|
956d55de2b | ||
|
|
b49104c2c9 | ||
|
|
e042e5df35 | ||
|
|
fcc8e5e839 | ||
|
|
2213fc9711 | ||
|
|
0d47d15a9f | ||
|
|
c51001f01e | ||
|
|
4351b99d2b | ||
|
|
4fb72ff76f | ||
|
|
4965f9a10e | ||
|
|
2d18c65ff7 | ||
|
|
e26906c1dc | ||
|
|
ee9abb6722 | ||
|
|
676a077c4e | ||
|
|
921c4b5597 | ||
|
|
224aa5151d | ||
|
|
9f9cb71d26 | ||
|
|
f26d88ca60 | ||
|
|
65faba91ad | ||
|
|
aa8ae31e5b | ||
|
|
1750cc464d | ||
|
|
a26c4a0930 | ||
|
|
67662564f3 | ||
|
|
de86b84a70 | ||
|
|
411aa9a41e | ||
|
|
5fea63327b | ||
|
|
e09b876863 | ||
|
|
f6d68d78f3 | ||
|
|
eecfa3f9e5 | ||
|
|
805e9bfc24 | ||
|
|
25f72944a0 | ||
|
|
cd2028288e | ||
|
|
0f02081392 | ||
|
|
aaabc1574f | ||
|
|
702a6d7044 | ||
|
|
641e401ba8 | ||
|
|
88d39708a9 | ||
|
|
e32185193e | ||
|
|
8504ec56e4 | ||
|
|
ca8a022cd9 | ||
|
|
6826feea14 | ||
|
|
6ce5dab38c | ||
|
|
50aee687c6 | ||
|
|
ee94ef55ee | ||
|
|
94bf733dae | ||
|
|
74d4154bcc | ||
|
|
246dc4f9cc | ||
|
|
e961c57fd2 | ||
|
|
092f302c0f | ||
|
|
c660b0cf79 | ||
|
|
16c83f786c | ||
|
|
e6862e6e7d | ||
|
|
e204657b3c | ||
|
|
2780d2d4dd | ||
|
|
9b59bde93d | ||
|
|
0de7cf898d | ||
|
|
7bc4c12477 | ||
|
|
283c2994de | ||
|
|
8a0951d934 | ||
|
|
32d4bb4590 | ||
|
|
99e5ee6a84 | ||
|
|
03d6b94c29 | ||
|
|
3833882ab7 | ||
|
|
ac449f186b | ||
|
|
052e23be3e | ||
|
|
1ea48a31da | ||
|
|
62505043be | ||
|
|
9938086df0 | ||
|
|
818252b1f8 | ||
|
|
6ae0194dc7 | ||
|
|
0bdb434383 | ||
|
|
15c04a5670 | ||
|
|
bdb6ae2ed3 | ||
|
|
41ee3be95f | ||
|
|
82102c99b3 | ||
|
|
fd781c89cc | ||
|
|
24385a00de | ||
|
|
f7c257553d | ||
|
|
6d0209e0aa | ||
|
|
700428593a | ||
|
|
340b42d8ee | ||
|
|
cbe4753e1a | ||
|
|
b01d9d27d9 | ||
|
|
0caef3cde7 | ||
|
|
96f6b90349 | ||
|
|
e3a7c96a8e | ||
|
|
8cf4cb9e48 | ||
|
|
b6d26d3f9f | ||
|
|
6eb40db353 | ||
|
|
62a3473ac0 | ||
|
|
7d5341dbd3 | ||
|
|
1b36ddf16c | ||
|
|
1757258b2a | ||
|
|
32da0a4d71 | ||
|
|
371bcb7580 | ||
|
|
ae646701c4 |
23
.github/CONTRIBUTING.md
vendored
23
.github/CONTRIBUTING.md
vendored
@@ -72,9 +72,10 @@ tell Poetry to use the virtualenv python environment (`poetry config virtualenvs
|
||||
|
||||
### Core vs. Experimental
|
||||
|
||||
This repository contains two separate projects:
|
||||
This repository contains three separate projects:
|
||||
- `langchain`: core langchain code, abstractions, and use cases.
|
||||
- `langchain.experimental`: see the [Experimental README](https://github.com/langchain-ai/langchain/tree/master/libs/experimental/README.md) for more information.
|
||||
- `langchain_core`: contain interfaces for key abstractions as well as logic for combining them in chains (LCEL).
|
||||
- `langchain_experimental`: see the [Experimental README](https://github.com/langchain-ai/langchain/tree/master/libs/experimental/README.md) for more information.
|
||||
|
||||
Each of these has its own development environment. Docs are run from the top-level makefile, but development
|
||||
is split across separate test & release flows.
|
||||
@@ -128,6 +129,24 @@ make docker_tests
|
||||
|
||||
There are also [integration tests and code-coverage](https://github.com/langchain-ai/langchain/tree/master/libs/langchain/tests/README.md) available.
|
||||
|
||||
### Only develop langchain_core or langchain_experimental
|
||||
|
||||
If you are only developing `langchain_core` or `langchain_experimental`, you can simply install the dependencies for the respective projects and run tests:
|
||||
|
||||
```bash
|
||||
cd libs/core
|
||||
poetry install --with test
|
||||
make test
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```bash
|
||||
cd libs/experimental
|
||||
poetry install --with test
|
||||
make test
|
||||
```
|
||||
|
||||
### Formatting and Linting
|
||||
|
||||
Run these locally before submitting a PR; the CI system will check also.
|
||||
|
||||
47
.github/scripts/check_diff.py
vendored
Normal file
47
.github/scripts/check_diff.py
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
import json
|
||||
import sys
|
||||
|
||||
ALL_DIRS = {
|
||||
"libs/core",
|
||||
"libs/langchain",
|
||||
"libs/experimental",
|
||||
"libs/community",
|
||||
"libs/partners/openai",
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
files = sys.argv[1:]
|
||||
dirs_to_run = set()
|
||||
|
||||
for file in files:
|
||||
if any(
|
||||
file.startswith(dir_)
|
||||
for dir_ in (
|
||||
".github/workflows",
|
||||
".github/tools",
|
||||
".github/actions",
|
||||
"libs/core",
|
||||
".github/scripts/check_diff.py",
|
||||
)
|
||||
):
|
||||
dirs_to_run = ALL_DIRS
|
||||
break
|
||||
elif "libs/community" in file:
|
||||
dirs_to_run.update(
|
||||
("libs/community", "libs/langchain", "libs/experimental")
|
||||
)
|
||||
elif "libs/partners" in file:
|
||||
partner_dir = file.split("/")[2]
|
||||
dirs_to_run.update(
|
||||
(f"libs/partners/{partner_dir}", "libs/langchain", "libs/experimental")
|
||||
)
|
||||
elif "libs/langchain" in file:
|
||||
dirs_to_run.update(("libs/langchain", "libs/experimental"))
|
||||
elif "libs/experimental" in file:
|
||||
dirs_to_run.add("libs/experimental")
|
||||
elif file.startswith("libs/"):
|
||||
dirs_to_run = ALL_DIRS
|
||||
break
|
||||
else:
|
||||
pass
|
||||
print(json.dumps(list(dirs_to_run)))
|
||||
@@ -1,21 +1,24 @@
|
||||
---
|
||||
name: libs/langchain CI
|
||||
name: langchain CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/actions/poetry_setup/action.yml"
|
||||
- ".github/tools/**"
|
||||
- ".github/workflows/_lint.yml"
|
||||
- ".github/workflows/_test.yml"
|
||||
- ".github/workflows/_pydantic_compatibility.yml"
|
||||
- ".github/workflows/langchain_ci.yml"
|
||||
- "libs/*"
|
||||
- "libs/langchain/**"
|
||||
- "libs/core/**"
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
workflow_call:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: choice
|
||||
default: 'libs/langchain'
|
||||
options:
|
||||
- libs/langchain
|
||||
- libs/core
|
||||
- libs/experimental
|
||||
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
@@ -24,43 +27,39 @@ on:
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.working-directory }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: "libs/langchain"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
pydantic-compatibility:
|
||||
uses: ./.github/workflows/_pydantic_compatibility.yml
|
||||
with:
|
||||
working-directory: libs/langchain
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
@@ -69,6 +68,9 @@ jobs:
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -77,19 +79,14 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/langchain
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
|
||||
- name: Install langchain core editable
|
||||
shell: bash
|
||||
run: |
|
||||
poetry run pip install -e ../core
|
||||
poetry install -E extended_testing --with test
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
|
||||
- name: Install integration dependencies
|
||||
shell: bash
|
||||
run: poetry install --with=test_integration
|
||||
run: poetry install --with=test_integration,test
|
||||
|
||||
- name: Check integration tests compile
|
||||
shell: bash
|
||||
|
||||
32
.github/workflows/_lint.yml
vendored
32
.github/workflows/_lint.yml
vendored
@@ -85,9 +85,37 @@ jobs:
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
key: mypy-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint
|
||||
make lint_package
|
||||
|
||||
- name: Install test dependencies
|
||||
# Also installs dev/lint/test/typing dependencies, to ensure we have
|
||||
# type hints for as many of our libraries as possible.
|
||||
# This helps catch errors that require dependencies to be spotted, for example:
|
||||
# https://github.com/langchain-ai/langchain/pull/10249/files#diff-935185cd488d015f026dcd9e19616ff62863e8cde8c0bee70318d3ccbca98341
|
||||
#
|
||||
# If you change this configuration, make sure to change the `cache-key`
|
||||
# in the `poetry_setup` action above to stop using the old cache.
|
||||
# It doesn't matter how you change it, any change will cause a cache-bust.
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
poetry install --with test
|
||||
|
||||
- name: Get .mypy_cache_test to speed up mypy
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache_test
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
make lint_tests
|
||||
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: poetry install
|
||||
run: poetry install --with test
|
||||
|
||||
- name: Install langchain editable
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
47
.github/workflows/check_diffs.yml
vendored
Normal file
47
.github/workflows/check_diffs.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
name: Check library diffs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/actions/**"
|
||||
- ".github/tools/**"
|
||||
- ".github/workflows/**"
|
||||
- "libs/**"
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- id: files
|
||||
uses: Ana06/get-changed-files@v2.2.0
|
||||
- id: set-matrix
|
||||
run: echo "dirs-to-run=$(python .github/scripts/check_diff.py ${{ steps.files.outputs.all }})" >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
dirs-to-run: ${{ steps.set-matrix.outputs.dirs-to-run }}
|
||||
ci:
|
||||
needs: [ build ]
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-run) }}
|
||||
uses: ./.github/workflows/_all_ci.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
|
||||
|
||||
47
.github/workflows/langchain_cli_ci.yml
vendored
47
.github/workflows/langchain_cli_ci.yml
vendored
@@ -1,47 +0,0 @@
|
||||
---
|
||||
name: libs/cli CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/actions/poetry_setup/action.yml'
|
||||
- '.github/tools/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/_test.yml'
|
||||
- '.github/workflows/_pydantic_compatibility.yml'
|
||||
- '.github/workflows/langchain_cli_ci.yml'
|
||||
- 'libs/cli/**'
|
||||
- 'libs/*'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: "libs/cli"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/cli
|
||||
langchain-location: ../langchain
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses:
|
||||
./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/cli
|
||||
secrets: inherit
|
||||
13
.github/workflows/langchain_community_release.yml
vendored
Normal file
13
.github/workflows/langchain_community_release.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: libs/community Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses:
|
||||
./.github/workflows/_release.yml
|
||||
with:
|
||||
working-directory: libs/community
|
||||
secrets: inherit
|
||||
52
.github/workflows/langchain_core_ci.yml
vendored
52
.github/workflows/langchain_core_ci.yml
vendored
@@ -1,52 +0,0 @@
|
||||
---
|
||||
name: libs/langchain core CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/actions/poetry_setup/action.yml'
|
||||
- '.github/tools/**'
|
||||
- '.github/workflows/_lint.yml'
|
||||
- '.github/workflows/_test.yml'
|
||||
- '.github/workflows/_pydantic_compatibility.yml'
|
||||
- '.github/workflows/langchain_core_ci.yml'
|
||||
- 'libs/core/**'
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: "libs/core"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/core
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses:
|
||||
./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/core
|
||||
secrets: inherit
|
||||
|
||||
pydantic-compatibility:
|
||||
uses:
|
||||
./.github/workflows/_pydantic_compatibility.yml
|
||||
with:
|
||||
working-directory: libs/core
|
||||
secrets: inherit
|
||||
136
.github/workflows/langchain_experimental_ci.yml
vendored
136
.github/workflows/langchain_experimental_ci.yml
vendored
@@ -1,136 +0,0 @@
|
||||
---
|
||||
name: libs/experimental CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/actions/poetry_setup/action.yml"
|
||||
- ".github/tools/**"
|
||||
- ".github/workflows/_lint.yml"
|
||||
- ".github/workflows/_test.yml"
|
||||
- ".github/workflows/langchain_experimental_ci.yml"
|
||||
- "libs/*"
|
||||
- "libs/experimental/**"
|
||||
- "libs/langchain/**"
|
||||
- "libs/core/**"
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
# If another push to the same PR or branch happens while this workflow is still running,
|
||||
# cancel the earlier run in favor of the next run.
|
||||
#
|
||||
# There's no point in testing an outdated version of the code. GitHub only allows
|
||||
# a limited number of job runners to be active at the same time, so it's better to cancel
|
||||
# pointless jobs early so that more useful jobs can run sooner.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.6.1"
|
||||
WORKDIR: "libs/experimental"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: libs/experimental
|
||||
secrets: inherit
|
||||
|
||||
# It's possible that langchain-experimental works fine with the latest *published* langchain,
|
||||
# but is broken with the langchain on `master`.
|
||||
#
|
||||
# We want to catch situations like that *before* releasing a new langchain, hence this test.
|
||||
test-with-latest-langchain:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: test with unpublished langchain - Python ${{ matrix.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
cache-key: unpublished-langchain
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running tests with unpublished langchain, installing dependencies with poetry..."
|
||||
poetry install
|
||||
|
||||
echo "Editably installing langchain outside of poetry, to avoid messing up lockfile..."
|
||||
poetry run pip install -e ../langchain
|
||||
poetry run pip install -e ../core
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
extended-tests:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKDIR }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }} extended tests
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: libs/experimental
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install -E extended_testing
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
13
.github/workflows/langchain_openai_release.yml
vendored
Normal file
13
.github/workflows/langchain_openai_release.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: libs/core Release
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows to trigger the workflow manually in GitHub UI
|
||||
|
||||
jobs:
|
||||
release:
|
||||
uses:
|
||||
./.github/workflows/_release.yml
|
||||
with:
|
||||
working-directory: libs/core
|
||||
secrets: inherit
|
||||
1
.github/workflows/templates_ci.yml
vendored
1
.github/workflows/templates_ci.yml
vendored
@@ -33,5 +33,4 @@ jobs:
|
||||
./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: templates
|
||||
langchain-location: ../libs/langchain
|
||||
secrets: inherit
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -167,8 +167,7 @@ docs/node_modules/
|
||||
docs/.docusaurus/
|
||||
docs/.cache-loader/
|
||||
docs/_dist
|
||||
docs/api_reference/api_reference.rst
|
||||
docs/api_reference/experimental_api_reference.rst
|
||||
docs/api_reference/*api_reference.rst
|
||||
docs/api_reference/_build
|
||||
docs/api_reference/*/
|
||||
!docs/api_reference/_static/
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
"""Main entrypoint into package."""
|
||||
from importlib import metadata
|
||||
|
||||
try:
|
||||
__version__ = metadata.version(__package__)
|
||||
except metadata.PackageNotFoundError:
|
||||
# Case where package metadata is not available.
|
||||
__version__ = ""
|
||||
del metadata # optional, avoids polluting the results of dir(__package__)
|
||||
@@ -18,45 +18,50 @@ from typing import Any
|
||||
|
||||
from langchain_core._api.path import as_import_path
|
||||
|
||||
from langchain_integrations.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
|
||||
#from langchain_integrations.agent_toolkits.amadeus.toolkit import AmadeusToolkit
|
||||
from langchain_integrations.agent_toolkits.azure_cognitive_services import (
|
||||
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
|
||||
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
|
||||
from langchain_community.agent_toolkits.azure_cognitive_services import (
|
||||
AzureCognitiveServicesToolkit,
|
||||
)
|
||||
#from langchain_integrations.agent_toolkits.conversational_retrieval.openai_functions import (
|
||||
# create_conversational_retrieval_agent,
|
||||
#)
|
||||
from langchain_integrations.agent_toolkits.file_management.toolkit import (
|
||||
from langchain_community.agent_toolkits.conversational_retrieval.openai_functions import ( # noqa: E501
|
||||
create_conversational_retrieval_agent,
|
||||
)
|
||||
from langchain_community.agent_toolkits.file_management.toolkit import (
|
||||
FileManagementToolkit,
|
||||
)
|
||||
from langchain_integrations.agent_toolkits.gmail.toolkit import GmailToolkit
|
||||
from langchain_integrations.agent_toolkits.jira.toolkit import JiraToolkit
|
||||
#from langchain_integrations.agent_toolkits.json.base import create_json_agent
|
||||
#from langchain_integrations.agent_toolkits.json.toolkit import JsonToolkit
|
||||
from langchain_integrations.agent_toolkits.multion.toolkit import MultionToolkit
|
||||
#from langchain_integrations.agent_toolkits.nla.toolkit import NLAToolkit
|
||||
from langchain_integrations.agent_toolkits.office365.toolkit import O365Toolkit
|
||||
#from langchain_integrations.agent_toolkits.openapi.base import create_openapi_agent
|
||||
#from langchain_integrations.agent_toolkits.openapi.toolkit import OpenAPIToolkit
|
||||
from langchain_integrations.agent_toolkits.playwright.toolkit import PlayWrightBrowserToolkit
|
||||
#from langchain_integrations.agent_toolkits.powerbi.base import create_pbi_agent
|
||||
#from langchain_integrations.agent_toolkits.powerbi.chat_base import create_pbi_chat_agent
|
||||
#from langchain_integrations.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
#from langchain_integrations.agent_toolkits.spark_sql.base import create_spark_sql_agent
|
||||
#from langchain_integrations.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
|
||||
from langchain_integrations.agent_toolkits.sql.base import create_sql_agent
|
||||
from langchain_integrations.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
|
||||
from langchain_integrations.agent_toolkits.vectorstore.base import (
|
||||
from langchain_community.agent_toolkits.gmail.toolkit import GmailToolkit
|
||||
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit
|
||||
from langchain_community.agent_toolkits.json.base import create_json_agent
|
||||
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
|
||||
from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit
|
||||
from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit
|
||||
from langchain_community.agent_toolkits.nla.toolkit import NLAToolkit
|
||||
from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit
|
||||
from langchain_community.agent_toolkits.openapi.base import create_openapi_agent
|
||||
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
|
||||
from langchain_community.agent_toolkits.playwright.toolkit import (
|
||||
PlayWrightBrowserToolkit,
|
||||
)
|
||||
from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent
|
||||
from langchain_community.agent_toolkits.powerbi.chat_base import create_pbi_chat_agent
|
||||
from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
from langchain_community.agent_toolkits.slack.toolkit import SlackToolkit
|
||||
from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent
|
||||
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
|
||||
from langchain_community.agent_toolkits.sql.base import create_sql_agent
|
||||
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
|
||||
from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit
|
||||
from langchain_community.agent_toolkits.vectorstore.base import (
|
||||
create_vectorstore_agent,
|
||||
create_vectorstore_router_agent,
|
||||
)
|
||||
from langchain_integrations.agent_toolkits.vectorstore.toolkit import (
|
||||
from langchain_community.agent_toolkits.vectorstore.toolkit import (
|
||||
VectorStoreInfo,
|
||||
VectorStoreRouterToolkit,
|
||||
VectorStoreToolkit,
|
||||
)
|
||||
from langchain_integrations.agent_toolkits.zapier.toolkit import ZapierToolkit
|
||||
from langchain_integrations.tools.retriever import create_retriever_tool
|
||||
from langchain_community.agent_toolkits.zapier.toolkit import ZapierToolkit
|
||||
from langchain_community.tools.retriever import create_retriever_tool
|
||||
|
||||
DEPRECATED_AGENTS = [
|
||||
"create_csv_agent",
|
||||
@@ -91,11 +96,14 @@ __all__ = [
|
||||
"JiraToolkit",
|
||||
"JsonToolkit",
|
||||
"MultionToolkit",
|
||||
"NasaToolkit",
|
||||
"NLAToolkit",
|
||||
"O365Toolkit",
|
||||
"OpenAPIToolkit",
|
||||
"PlayWrightBrowserToolkit",
|
||||
"PowerBIToolkit",
|
||||
"SlackToolkit",
|
||||
"SteamToolkit",
|
||||
"SQLDatabaseToolkit",
|
||||
"SparkSQLToolkit",
|
||||
"VectorStoreInfo",
|
||||
@@ -1,19 +1,16 @@
|
||||
from typing import Any, List, Optional
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, List, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.memory import BaseMemory
|
||||
from langchain_core.messages import SystemMessage
|
||||
from langchain_core.prompts.chat import MessagesPlaceholder
|
||||
|
||||
from langchain_integrations.agents.agent import AgentExecutor
|
||||
from langchain_integrations.agents.openai_functions_agent.agent_token_buffer_memory import (
|
||||
AgentTokenBufferMemory,
|
||||
)
|
||||
from langchain_integrations.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
||||
from langchain_openai.chat_model import ChatOpenAI
|
||||
from langchain_integrations.memory.token_buffer import ConversationTokenBufferMemory
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
|
||||
|
||||
def _get_default_system_message() -> SystemMessage:
|
||||
return SystemMessage(
|
||||
@@ -24,7 +21,6 @@ def _get_default_system_message() -> SystemMessage:
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def create_conversational_retrieval_agent(
|
||||
llm: BaseLanguageModel,
|
||||
tools: List[BaseTool],
|
||||
@@ -56,9 +52,13 @@ def create_conversational_retrieval_agent(
|
||||
Returns:
|
||||
An agent executor initialized appropriately
|
||||
"""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
|
||||
AgentTokenBufferMemory,
|
||||
)
|
||||
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
||||
from langchain.memory.token_buffer import ConversationTokenBufferMemory
|
||||
|
||||
if not isinstance(llm, ChatOpenAI):
|
||||
raise ValueError("Only supported with ChatOpenAI models.")
|
||||
if remember_intermediate_steps:
|
||||
memory: BaseMemory = AgentTokenBufferMemory(
|
||||
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
|
||||
@@ -1,15 +1,15 @@
|
||||
"""Json agent."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from __future__ import annotations
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
from langchain_integrations.agents.agent import AgentExecutor
|
||||
from langchain_integrations.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
|
||||
from langchain_integrations.agent_toolkits.json.toolkit import JsonToolkit
|
||||
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
|
||||
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
|
||||
|
||||
def create_json_agent(
|
||||
@@ -18,20 +18,24 @@ def create_json_agent(
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = JSON_PREFIX,
|
||||
suffix: str = JSON_SUFFIX,
|
||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
||||
format_instructions: Optional[str] = None,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
verbose: bool = False,
|
||||
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Construct a json agent from an LLM and tools."""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
tools = toolkit.get_tools()
|
||||
prompt_params = {"format_instructions": format_instructions} if format_instructions is not None else {}
|
||||
prompt = ZeroShotAgent.create_prompt(
|
||||
tools,
|
||||
prefix=prefix,
|
||||
suffix=suffix,
|
||||
format_instructions=format_instructions,
|
||||
input_variables=input_variables,
|
||||
**prompt_params,
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
@@ -1,15 +1,17 @@
|
||||
"""Tool for interacting with a single API with natural language definition."""
|
||||
|
||||
|
||||
from typing import Any, Optional
|
||||
from __future__ import annotations
|
||||
from typing import Any, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.tools import Tool
|
||||
|
||||
from langchain_integrations.agents.tools import Tool
|
||||
from langchain_integrations.chains.api.openapi.chain import OpenAPIEndpointChain
|
||||
from langchain_integrations.tools.openapi.utils.api_models import APIOperation
|
||||
from langchain_integrations.tools.openapi.utils.openapi_utils import OpenAPISpec
|
||||
from langchain_integrations.utilities.requests import Requests
|
||||
from langchain_community.tools.openapi.utils.api_models import APIOperation
|
||||
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
|
||||
from langchain_community.utilities.requests import Requests
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.chains.api.openapi.chain import OpenAPIEndpointChain
|
||||
|
||||
|
||||
class NLATool(Tool):
|
||||
@@ -1,18 +1,18 @@
|
||||
"""OpenAPI spec agent."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from __future__ import annotations
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
from langchain_integrations.agents.agent import AgentExecutor
|
||||
from langchain_integrations.agent_toolkits.openapi.prompt import (
|
||||
from langchain_community.agent_toolkits.openapi.prompt import (
|
||||
OPENAPI_PREFIX,
|
||||
OPENAPI_SUFFIX,
|
||||
)
|
||||
from langchain_integrations.agent_toolkits.openapi.toolkit import OpenAPIToolkit
|
||||
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
|
||||
|
||||
def create_openapi_agent(
|
||||
@@ -21,7 +21,7 @@ def create_openapi_agent(
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = OPENAPI_PREFIX,
|
||||
suffix: str = OPENAPI_SUFFIX,
|
||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
||||
format_instructions: Optional[str] = None,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
max_iterations: Optional[int] = 15,
|
||||
max_execution_time: Optional[float] = None,
|
||||
@@ -45,13 +45,17 @@ def create_openapi_agent(
|
||||
|
||||
See https://python.langchain.com/docs/security for more information.
|
||||
"""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
tools = toolkit.get_tools()
|
||||
prompt_params = {"format_instructions": format_instructions} if format_instructions is not None else {}
|
||||
prompt = ZeroShotAgent.create_prompt(
|
||||
tools,
|
||||
prefix=prefix,
|
||||
suffix=suffix,
|
||||
format_instructions=format_instructions,
|
||||
input_variables=input_variables,
|
||||
**prompt_params
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
@@ -2,15 +2,17 @@
|
||||
import json
|
||||
import re
|
||||
from functools import partial
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
import yaml
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool, Tool
|
||||
from langchain_openai.llms import OpenAI
|
||||
|
||||
from langchain_integrations.agents.agent import AgentExecutor
|
||||
from langchain_integrations.agent_toolkits.openapi.planner_prompt import (
|
||||
from langchain_community.agent_toolkits.openapi.planner_prompt import (
|
||||
API_CONTROLLER_PROMPT,
|
||||
API_CONTROLLER_TOOL_DESCRIPTION,
|
||||
API_CONTROLLER_TOOL_NAME,
|
||||
@@ -29,16 +31,15 @@ from langchain_integrations.agent_toolkits.openapi.planner_prompt import (
|
||||
REQUESTS_POST_TOOL_DESCRIPTION,
|
||||
REQUESTS_PUT_TOOL_DESCRIPTION,
|
||||
)
|
||||
from langchain_integrations.agent_toolkits.openapi.spec import ReducedOpenAPISpec
|
||||
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain_integrations.agents.tools import Tool
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_openai.llm import OpenAI
|
||||
from langchain_integrations.memory import ReadOnlySharedMemory
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_integrations.tools.requests.tool import BaseRequestsTool
|
||||
from langchain_integrations.utilities.requests import RequestsWrapper
|
||||
from langchain_community.agent_toolkits.openapi.spec import ReducedOpenAPISpec
|
||||
from langchain_community.output_parsers.json import parse_json_markdown
|
||||
from langchain_community.tools.requests.tool import BaseRequestsTool
|
||||
from langchain_community.utilities.requests import RequestsWrapper
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.memory import ReadOnlySharedMemory
|
||||
|
||||
#
|
||||
# Requests tools with LLM-instructed extraction of truncated responses.
|
||||
@@ -51,6 +52,7 @@ MAX_RESPONSE_LENGTH = 5000
|
||||
|
||||
|
||||
def _get_default_llm_chain(prompt: BasePromptTemplate) -> LLMChain:
|
||||
from langchain.chains.llm import LLMChain
|
||||
return LLMChain(
|
||||
llm=OpenAI(),
|
||||
prompt=prompt,
|
||||
@@ -73,14 +75,14 @@ class RequestsGetToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""Tool description."""
|
||||
response_length: Optional[int] = MAX_RESPONSE_LENGTH
|
||||
"""Maximum length of the response to be returned."""
|
||||
llm_chain: LLMChain = Field(
|
||||
llm_chain: Any = Field(
|
||||
default_factory=_get_default_llm_chain_factory(PARSING_GET_PROMPT)
|
||||
)
|
||||
"""LLMChain used to extract the response."""
|
||||
|
||||
def _run(self, text: str) -> str:
|
||||
try:
|
||||
data = json.loads(text)
|
||||
data = parse_json_markdown(text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
data_params = data.get("params")
|
||||
@@ -103,14 +105,14 @@ class RequestsPostToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""Tool description."""
|
||||
response_length: Optional[int] = MAX_RESPONSE_LENGTH
|
||||
"""Maximum length of the response to be returned."""
|
||||
llm_chain: LLMChain = Field(
|
||||
llm_chain: Any = Field(
|
||||
default_factory=_get_default_llm_chain_factory(PARSING_POST_PROMPT)
|
||||
)
|
||||
"""LLMChain used to extract the response."""
|
||||
|
||||
def _run(self, text: str) -> str:
|
||||
try:
|
||||
data = json.loads(text)
|
||||
data = parse_json_markdown(text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
response = self.requests_wrapper.post(data["url"], data["data"])
|
||||
@@ -132,14 +134,14 @@ class RequestsPatchToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""Tool description."""
|
||||
response_length: Optional[int] = MAX_RESPONSE_LENGTH
|
||||
"""Maximum length of the response to be returned."""
|
||||
llm_chain: LLMChain = Field(
|
||||
llm_chain: Any = Field(
|
||||
default_factory=_get_default_llm_chain_factory(PARSING_PATCH_PROMPT)
|
||||
)
|
||||
"""LLMChain used to extract the response."""
|
||||
|
||||
def _run(self, text: str) -> str:
|
||||
try:
|
||||
data = json.loads(text)
|
||||
data = parse_json_markdown(text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
response = self.requests_wrapper.patch(data["url"], data["data"])
|
||||
@@ -161,14 +163,14 @@ class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""Tool description."""
|
||||
response_length: Optional[int] = MAX_RESPONSE_LENGTH
|
||||
"""Maximum length of the response to be returned."""
|
||||
llm_chain: LLMChain = Field(
|
||||
llm_chain: Any = Field(
|
||||
default_factory=_get_default_llm_chain_factory(PARSING_PUT_PROMPT)
|
||||
)
|
||||
"""LLMChain used to extract the response."""
|
||||
|
||||
def _run(self, text: str) -> str:
|
||||
try:
|
||||
data = json.loads(text)
|
||||
data = parse_json_markdown(text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
response = self.requests_wrapper.put(data["url"], data["data"])
|
||||
@@ -191,14 +193,14 @@ class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
|
||||
response_length: Optional[int] = MAX_RESPONSE_LENGTH
|
||||
"""The maximum length of the response."""
|
||||
llm_chain: LLMChain = Field(
|
||||
llm_chain: Any = Field(
|
||||
default_factory=_get_default_llm_chain_factory(PARSING_DELETE_PROMPT)
|
||||
)
|
||||
"""The LLM chain used to parse the response."""
|
||||
|
||||
def _run(self, text: str) -> str:
|
||||
try:
|
||||
data = json.loads(text)
|
||||
data = parse_json_markdown(text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
response = self.requests_wrapper.delete(data["url"])
|
||||
@@ -217,6 +219,7 @@ class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
def _create_api_planner_tool(
|
||||
api_spec: ReducedOpenAPISpec, llm: BaseLanguageModel
|
||||
) -> Tool:
|
||||
from langchain.chains.llm import LLMChain
|
||||
endpoint_descriptions = [
|
||||
f"{name} {description}" for name, description, _ in api_spec.endpoints
|
||||
]
|
||||
@@ -240,6 +243,9 @@ def _create_api_controller_agent(
|
||||
requests_wrapper: RequestsWrapper,
|
||||
llm: BaseLanguageModel,
|
||||
) -> AgentExecutor:
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.chains.llm import LLMChain
|
||||
get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT)
|
||||
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
|
||||
tools: List[BaseTool] = [
|
||||
@@ -329,6 +335,9 @@ def create_openapi_agent(
|
||||
rather than a top-level planner
|
||||
that invokes a controller with its plan. This is to keep the planner simple.
|
||||
"""
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.chains.llm import LLMChain
|
||||
tools = [
|
||||
_create_api_planner_tool(api_spec, llm),
|
||||
_create_api_controller_tool(api_spec, requests_wrapper, llm),
|
||||
@@ -4,23 +4,22 @@ from __future__ import annotations
|
||||
from typing import Any, List
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.tools import Tool
|
||||
|
||||
from langchain_integrations.agents.agent import AgentExecutor
|
||||
from langchain_integrations.agent_toolkits.base import BaseToolkit
|
||||
from langchain_integrations.agent_toolkits.json.base import create_json_agent
|
||||
from langchain_integrations.agent_toolkits.json.toolkit import JsonToolkit
|
||||
from langchain_integrations.agent_toolkits.openapi.prompt import DESCRIPTION
|
||||
from langchain_integrations.agents.tools import Tool
|
||||
from langchain_integrations.tools import BaseTool
|
||||
from langchain_integrations.tools.json.tool import JsonSpec
|
||||
from langchain_integrations.tools.requests.tool import (
|
||||
from langchain_community.agent_toolkits.base import BaseToolkit
|
||||
from langchain_community.agent_toolkits.json.base import create_json_agent
|
||||
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
|
||||
from langchain_community.agent_toolkits.openapi.prompt import DESCRIPTION
|
||||
from langchain_community.tools import BaseTool
|
||||
from langchain_community.tools.json.tool import JsonSpec
|
||||
from langchain_community.tools.requests.tool import (
|
||||
RequestsDeleteTool,
|
||||
RequestsGetTool,
|
||||
RequestsPatchTool,
|
||||
RequestsPostTool,
|
||||
RequestsPutTool,
|
||||
)
|
||||
from langchain_integrations.utilities.requests import TextRequestsWrapper
|
||||
from langchain_community.utilities.requests import TextRequestsWrapper
|
||||
|
||||
|
||||
class RequestsToolkit(BaseToolkit):
|
||||
@@ -65,7 +64,7 @@ class OpenAPIToolkit(BaseToolkit):
|
||||
an OpenAPI compliant API.
|
||||
"""
|
||||
|
||||
json_agent: AgentExecutor
|
||||
json_agent: Any
|
||||
requests_wrapper: TextRequestsWrapper
|
||||
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
@@ -1,19 +1,20 @@
|
||||
"""Power BI agent."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
from langchain_integrations.agents import AgentExecutor
|
||||
from langchain_integrations.agent_toolkits.powerbi.prompt import (
|
||||
from langchain_community.agent_toolkits.powerbi.prompt import (
|
||||
POWERBI_PREFIX,
|
||||
POWERBI_SUFFIX,
|
||||
)
|
||||
from langchain_integrations.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_integrations.utilities.powerbi import PowerBIDataset
|
||||
from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
from langchain_community.utilities.powerbi import PowerBIDataset
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents import AgentExecutor
|
||||
|
||||
|
||||
def create_pbi_agent(
|
||||
@@ -23,7 +24,7 @@ def create_pbi_agent(
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = POWERBI_PREFIX,
|
||||
suffix: str = POWERBI_SUFFIX,
|
||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
||||
format_instructions: Optional[str] = None,
|
||||
examples: Optional[str] = None,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
top_k: int = 10,
|
||||
@@ -32,12 +33,16 @@ def create_pbi_agent(
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Construct a Power BI agent from an LLM and tools."""
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.chains.llm import LLMChain
|
||||
if toolkit is None:
|
||||
if powerbi is None:
|
||||
raise ValueError("Must provide either a toolkit or powerbi dataset")
|
||||
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
|
||||
tools = toolkit.get_tools()
|
||||
tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names
|
||||
prompt_params = {"format_instructions": format_instructions} if format_instructions is not None else {}
|
||||
agent = ZeroShotAgent(
|
||||
llm_chain=LLMChain(
|
||||
llm=llm,
|
||||
@@ -45,8 +50,8 @@ def create_pbi_agent(
|
||||
tools,
|
||||
prefix=prefix.format(top_k=top_k).format(tables=tables),
|
||||
suffix=suffix,
|
||||
format_instructions=format_instructions,
|
||||
input_variables=input_variables,
|
||||
**prompt_params,
|
||||
),
|
||||
callback_manager=callback_manager, # type: ignore
|
||||
verbose=verbose,
|
||||
@@ -1,19 +1,21 @@
|
||||
"""Power BI agent."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from __future__ import annotations
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_integrations.agents import AgentExecutor
|
||||
from langchain_integrations.agents.agent import AgentOutputParser
|
||||
from langchain_integrations.agent_toolkits.powerbi.prompt import (
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
|
||||
from langchain_community.agent_toolkits.powerbi.prompt import (
|
||||
POWERBI_CHAT_PREFIX,
|
||||
POWERBI_CHAT_SUFFIX,
|
||||
)
|
||||
from langchain_integrations.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
from langchain_integrations.agents.conversational_chat.base import ConversationalChatAgent
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_integrations.chat_models.base import BaseChatModel
|
||||
from langchain_integrations.memory import ConversationBufferMemory
|
||||
from langchain_integrations.memory.chat_memory import BaseChatMemory
|
||||
from langchain_integrations.utilities.powerbi import PowerBIDataset
|
||||
from langchain_community.agent_toolkits.powerbi.toolkit import PowerBIToolkit
|
||||
from langchain_community.utilities.powerbi import PowerBIDataset
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.agent import AgentOutputParser
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
|
||||
|
||||
def create_pbi_chat_agent(
|
||||
@@ -36,6 +38,9 @@ def create_pbi_chat_agent(
|
||||
|
||||
If you supply only a toolkit and no Power BI dataset, the same LLM is used for both.
|
||||
"""
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.conversational_chat.base import ConversationalChatAgent
|
||||
from langchain.memory import ConversationBufferMemory
|
||||
if toolkit is None:
|
||||
if powerbi is None:
|
||||
raise ValueError("Must provide either a toolkit or powerbi dataset")
|
||||
@@ -1,7 +1,10 @@
|
||||
"""Toolkit for interacting with a Power BI dataset."""
|
||||
from typing import List, Optional, Union
|
||||
from __future__ import annotations
|
||||
from typing import List, Optional, Union, TYPE_CHECKING
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain_core.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
@@ -10,22 +13,22 @@ from langchain_core.prompts.chat import (
|
||||
)
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
|
||||
from langchain_integrations.agent_toolkits.base import BaseToolkit
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_integrations.chat_models.base import BaseChatModel
|
||||
from langchain_integrations.tools import BaseTool
|
||||
from langchain_integrations.tools.powerbi.prompt import (
|
||||
from langchain_community.agent_toolkits.base import BaseToolkit
|
||||
from langchain_community.tools import BaseTool
|
||||
from langchain_community.tools.powerbi.prompt import (
|
||||
QUESTION_TO_QUERY_BASE,
|
||||
SINGLE_QUESTION_TO_QUERY,
|
||||
USER_INPUT,
|
||||
)
|
||||
from langchain_integrations.tools.powerbi.tool import (
|
||||
from langchain_community.tools.powerbi.tool import (
|
||||
InfoPowerBITool,
|
||||
ListPowerBITool,
|
||||
QueryPowerBITool,
|
||||
)
|
||||
from langchain_integrations.utilities.powerbi import PowerBIDataset
|
||||
from langchain_community.utilities.powerbi import PowerBIDataset
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.chains.llm import LLMChain
|
||||
|
||||
|
||||
class PowerBIToolkit(BaseToolkit):
|
||||
@@ -71,6 +74,7 @@ class PowerBIToolkit(BaseToolkit):
|
||||
|
||||
def _get_chain(self) -> LLMChain:
|
||||
"""Construct the chain based on the callback manager and model type."""
|
||||
from langchain.chains.llm import LLMChain
|
||||
if isinstance(self.llm, BaseLanguageModel):
|
||||
return LLMChain(
|
||||
llm=self.llm,
|
||||
@@ -1,15 +1,15 @@
|
||||
"""Spark SQL agent."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from __future__ import annotations
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackManager, Callbacks
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
from langchain_integrations.agents.agent import AgentExecutor
|
||||
from langchain_integrations.agent_toolkits.spark_sql.prompt import SQL_PREFIX, SQL_SUFFIX
|
||||
from langchain_integrations.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
|
||||
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
|
||||
from langchain_core.callbacks.base import BaseCallbackManager, Callbacks
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_community.agent_toolkits.spark_sql.prompt import SQL_PREFIX, SQL_SUFFIX
|
||||
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
|
||||
|
||||
def create_spark_sql_agent(
|
||||
@@ -19,7 +19,7 @@ def create_spark_sql_agent(
|
||||
callbacks: Callbacks = None,
|
||||
prefix: str = SQL_PREFIX,
|
||||
suffix: str = SQL_SUFFIX,
|
||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
||||
format_instructions: Optional[str] = None,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
top_k: int = 10,
|
||||
max_iterations: Optional[int] = 15,
|
||||
@@ -30,14 +30,18 @@ def create_spark_sql_agent(
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Construct a Spark SQL agent from an LLM and tools."""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
tools = toolkit.get_tools()
|
||||
prefix = prefix.format(top_k=top_k)
|
||||
prompt_params = {"format_instructions": format_instructions} if format_instructions is not None else {}
|
||||
prompt = ZeroShotAgent.create_prompt(
|
||||
tools,
|
||||
prefix=prefix,
|
||||
suffix=suffix,
|
||||
format_instructions=format_instructions,
|
||||
input_variables=input_variables,
|
||||
**prompt_params,
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
@@ -1,6 +1,8 @@
|
||||
"""SQL agent."""
|
||||
from typing import Any, Dict, List, Optional, Sequence
|
||||
from __future__ import annotations
|
||||
from typing import Any, Dict, List, Optional, Sequence, TYPE_CHECKING
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.messages import AIMessage, SystemMessage
|
||||
from langchain_core.prompts.chat import (
|
||||
@@ -9,30 +11,27 @@ from langchain_core.prompts.chat import (
|
||||
MessagesPlaceholder,
|
||||
)
|
||||
|
||||
from langchain_integrations.agents.agent import AgentExecutor, BaseSingleActionAgent
|
||||
from langchain_integrations.agent_toolkits.sql.prompt import (
|
||||
from langchain_community.agent_toolkits.sql.prompt import (
|
||||
SQL_FUNCTIONS_SUFFIX,
|
||||
SQL_PREFIX,
|
||||
SQL_SUFFIX,
|
||||
)
|
||||
from langchain_integrations.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
|
||||
from langchain_integrations.agents.agent_types import AgentType
|
||||
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain_integrations.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
|
||||
from langchain_integrations.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_integrations.tools import BaseTool
|
||||
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
|
||||
from langchain_community.tools import BaseTool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.agent_types import AgentType
|
||||
|
||||
|
||||
def create_sql_agent(
|
||||
llm: BaseLanguageModel,
|
||||
toolkit: SQLDatabaseToolkit,
|
||||
agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
||||
agent_type: Optional[AgentType] = None,
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = SQL_PREFIX,
|
||||
suffix: Optional[str] = None,
|
||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
||||
format_instructions: Optional[str] = None,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
top_k: int = 10,
|
||||
max_iterations: Optional[int] = 15,
|
||||
@@ -44,17 +43,24 @@ def create_sql_agent(
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Construct an SQL agent from an LLM and tools."""
|
||||
from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent
|
||||
from langchain.agents.agent_types import AgentType
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
agent_type = agent_type or AgentType.ZERO_SHOT_REACT_DESCRIPTION
|
||||
tools = toolkit.get_tools() + list(extra_tools)
|
||||
prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k)
|
||||
agent: BaseSingleActionAgent
|
||||
|
||||
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
|
||||
prompt_params = {"format_instructions": format_instructions} if format_instructions is not None else {}
|
||||
prompt = ZeroShotAgent.create_prompt(
|
||||
tools,
|
||||
prefix=prefix,
|
||||
suffix=suffix or SQL_SUFFIX,
|
||||
format_instructions=format_instructions,
|
||||
input_variables=input_variables,
|
||||
**prompt_params,
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
@@ -1,17 +1,18 @@
|
||||
"""VectorStore agent."""
|
||||
from typing import Any, Dict, Optional
|
||||
from __future__ import annotations
|
||||
from typing import Any, Dict, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackManager
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
|
||||
from langchain_integrations.agents.agent import AgentExecutor
|
||||
from langchain_integrations.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX
|
||||
from langchain_integrations.agent_toolkits.vectorstore.toolkit import (
|
||||
from langchain_community.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX
|
||||
from langchain_community.agent_toolkits.vectorstore.toolkit import (
|
||||
VectorStoreRouterToolkit,
|
||||
VectorStoreToolkit,
|
||||
)
|
||||
from langchain_integrations.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
|
||||
|
||||
def create_vectorstore_agent(
|
||||
@@ -37,6 +38,9 @@ def create_vectorstore_agent(
|
||||
Returns:
|
||||
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response
|
||||
""" # noqa: E501
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
tools = toolkit.get_tools()
|
||||
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
|
||||
llm_chain = LLMChain(
|
||||
@@ -78,6 +82,9 @@ def create_vectorstore_router_agent(
|
||||
Returns:
|
||||
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response.
|
||||
""" # noqa: E501
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.chains.llm import LLMChain
|
||||
tools = toolkit.get_tools()
|
||||
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
|
||||
llm_chain = LLMChain(
|
||||
@@ -0,0 +1,83 @@
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
StdOutCallbackHandler,
|
||||
StreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
|
||||
from langchain_community.callbacks.aim_callback import AimCallbackHandler
|
||||
from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler
|
||||
from langchain_community.callbacks.arize_callback import ArizeCallbackHandler
|
||||
from langchain_community.callbacks.arthur_callback import ArthurCallbackHandler
|
||||
from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler
|
||||
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
|
||||
from langchain_community.callbacks.context_callback import ContextCallbackHandler
|
||||
from langchain_community.callbacks.file import FileCallbackHandler
|
||||
from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler
|
||||
from langchain_community.callbacks.human import HumanApprovalCallbackHandler
|
||||
from langchain_community.callbacks.infino_callback import InfinoCallbackHandler
|
||||
from langchain_community.callbacks.labelstudio_callback import (
|
||||
LabelStudioCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
from langchain_community.callbacks.mlflow_callback import MlflowCallbackHandler
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.promptlayer_callback import (
|
||||
PromptLayerCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.sagemaker_callback import SageMakerCallbackHandler
|
||||
from langchain_community.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain_community.callbacks.streaming_stdout_final_only import (
|
||||
FinalStreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.streamlit import (
|
||||
LLMThoughtLabeler,
|
||||
StreamlitCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.trubrics_callback import TrubricsCallbackHandler
|
||||
from langchain_community.callbacks.wandb_callback import WandbCallbackHandler
|
||||
from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler
|
||||
|
||||
__all__ = [
|
||||
"AimCallbackHandler",
|
||||
"ArgillaCallbackHandler",
|
||||
"ArizeCallbackHandler",
|
||||
"PromptLayerCallbackHandler",
|
||||
"ArthurCallbackHandler",
|
||||
"ClearMLCallbackHandler",
|
||||
"CometCallbackHandler",
|
||||
"ContextCallbackHandler",
|
||||
"FileCallbackHandler",
|
||||
"HumanApprovalCallbackHandler",
|
||||
"InfinoCallbackHandler",
|
||||
"MlflowCallbackHandler",
|
||||
"LLMonitorCallbackHandler",
|
||||
"OpenAICallbackHandler",
|
||||
"StdOutCallbackHandler",
|
||||
"AsyncIteratorCallbackHandler",
|
||||
"StreamingStdOutCallbackHandler",
|
||||
"FinalStreamingStdOutCallbackHandler",
|
||||
"LLMThoughtLabeler",
|
||||
"LangChainTracer",
|
||||
"StreamlitCallbackHandler",
|
||||
"WandbCallbackHandler",
|
||||
"WhyLabsCallbackHandler",
|
||||
"get_openai_callback",
|
||||
"wandb_tracing_enabled",
|
||||
"FlyteCallbackHandler",
|
||||
"SageMakerCallbackHandler",
|
||||
"LabelStudioCallbackHandler",
|
||||
"TrubricsCallbackHandler",
|
||||
]
|
||||
@@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
from contextvars import ContextVar
|
||||
from typing import (
|
||||
Generator,
|
||||
Optional,
|
||||
)
|
||||
|
||||
from langchain_core.tracers.context import register_configure_hook
|
||||
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.tracers.wandb import WandbTracer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
|
||||
"openai_callback", default=None
|
||||
)
|
||||
wandb_tracing_callback_var: ContextVar[Optional[WandbTracer]] = ContextVar( # noqa: E501
|
||||
"tracing_wandb_callback", default=None
|
||||
)
|
||||
|
||||
register_configure_hook(openai_callback_var, True)
|
||||
register_configure_hook(
|
||||
wandb_tracing_callback_var, True, WandbTracer, "LANGCHAIN_WANDB_TRACING"
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
|
||||
"""Get the OpenAI callback handler in a context manager.
|
||||
which conveniently exposes token and cost information.
|
||||
|
||||
Returns:
|
||||
OpenAICallbackHandler: The OpenAI callback handler.
|
||||
|
||||
Example:
|
||||
>>> with get_openai_callback() as cb:
|
||||
... # Use the OpenAI callback handler
|
||||
"""
|
||||
cb = OpenAICallbackHandler()
|
||||
openai_callback_var.set(cb)
|
||||
yield cb
|
||||
openai_callback_var.set(None)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def wandb_tracing_enabled(
|
||||
session_name: str = "default",
|
||||
) -> Generator[None, None, None]:
|
||||
"""Get the WandbTracer in a context manager.
|
||||
|
||||
Args:
|
||||
session_name (str, optional): The name of the session.
|
||||
Defaults to "default".
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Example:
|
||||
>>> with wandb_tracing_enabled() as session:
|
||||
... # Use the WandbTracer session
|
||||
"""
|
||||
cb = WandbTracer()
|
||||
wandb_tracing_callback_var.set(cb)
|
||||
yield None
|
||||
wandb_tracing_callback_var.set(None)
|
||||
@@ -0,0 +1,20 @@
|
||||
"""Tracers that record execution of LangChain runs."""
|
||||
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
from langchain_core.tracers.langchain_v1 import LangChainTracerV1
|
||||
from langchain_core.tracers.stdout import (
|
||||
ConsoleCallbackHandler,
|
||||
FunctionCallbackHandler,
|
||||
)
|
||||
|
||||
from langchain_community.callbacks.tracers.logging import LoggingCallbackHandler
|
||||
from langchain_community.callbacks.tracers.wandb import WandbTracer
|
||||
|
||||
__all__ = [
|
||||
"ConsoleCallbackHandler",
|
||||
"FunctionCallbackHandler",
|
||||
"LoggingCallbackHandler",
|
||||
"LangChainTracer",
|
||||
"LangChainTracerV1",
|
||||
"WandbTracer",
|
||||
]
|
||||
@@ -0,0 +1,78 @@
|
||||
"""**Chat Models** are a variation on language models.
|
||||
|
||||
While Chat Models use language models under the hood, the interface they expose
|
||||
is a bit different. Rather than expose a "text in, text out" API, they expose
|
||||
an interface where "chat messages" are the inputs and outputs.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLanguageModel --> BaseChatModel --> <name> # Examples: ChatOpenAI, ChatGooglePalm
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AIMessage, BaseMessage, HumanMessage
|
||||
""" # noqa: E501
|
||||
|
||||
from langchain_community.chat_models.anthropic import ChatAnthropic
|
||||
from langchain_community.chat_models.anyscale import ChatAnyscale
|
||||
from langchain_community.chat_models.baichuan import ChatBaichuan
|
||||
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
|
||||
from langchain_community.chat_models.bedrock import BedrockChat
|
||||
from langchain_community.chat_models.cohere import ChatCohere
|
||||
from langchain_community.chat_models.databricks import ChatDatabricks
|
||||
from langchain_community.chat_models.ernie import ErnieBotChat
|
||||
from langchain_community.chat_models.everlyai import ChatEverlyAI
|
||||
from langchain_community.chat_models.fake import FakeListChatModel
|
||||
from langchain_community.chat_models.fireworks import ChatFireworks
|
||||
from langchain_community.chat_models.gigachat import GigaChat
|
||||
from langchain_community.chat_models.google_palm import ChatGooglePalm
|
||||
from langchain_community.chat_models.human import HumanInputChatModel
|
||||
from langchain_community.chat_models.hunyuan import ChatHunyuan
|
||||
from langchain_community.chat_models.javelin_ai_gateway import ChatJavelinAIGateway
|
||||
from langchain_community.chat_models.jinachat import JinaChat
|
||||
from langchain_community.chat_models.konko import ChatKonko
|
||||
from langchain_community.chat_models.litellm import ChatLiteLLM
|
||||
from langchain_community.chat_models.minimax import MiniMaxChat
|
||||
from langchain_community.chat_models.mlflow import ChatMlflow
|
||||
from langchain_community.chat_models.mlflow_ai_gateway import ChatMLflowAIGateway
|
||||
from langchain_community.chat_models.ollama import ChatOllama
|
||||
from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint
|
||||
from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
||||
from langchain_community.chat_models.vertexai import ChatVertexAI
|
||||
from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat
|
||||
from langchain_community.chat_models.yandex import ChatYandexGPT
|
||||
|
||||
__all__ = [
|
||||
"BedrockChat",
|
||||
"FakeListChatModel",
|
||||
"PromptLayerChatOpenAI",
|
||||
"ChatDatabricks",
|
||||
"ChatEverlyAI",
|
||||
"ChatAnthropic",
|
||||
"ChatCohere",
|
||||
"ChatGooglePalm",
|
||||
"ChatMlflow",
|
||||
"ChatMLflowAIGateway",
|
||||
"ChatOllama",
|
||||
"ChatVertexAI",
|
||||
"JinaChat",
|
||||
"HumanInputChatModel",
|
||||
"MiniMaxChat",
|
||||
"ChatAnyscale",
|
||||
"ChatLiteLLM",
|
||||
"ErnieBotChat",
|
||||
"ChatJavelinAIGateway",
|
||||
"ChatKonko",
|
||||
"PaiEasChatEndpoint",
|
||||
"QianfanChatEndpoint",
|
||||
"ChatFireworks",
|
||||
"ChatYandexGPT",
|
||||
"ChatBaichuan",
|
||||
"ChatHunyuan",
|
||||
"GigaChat",
|
||||
"VolcEngineMaasChat",
|
||||
]
|
||||
@@ -1,11 +1,14 @@
|
||||
"""Abstract interface for document loader implementations."""
|
||||
from __future__ import annotations
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Iterator, List, Optional
|
||||
from typing import Iterator, List, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_integrations.document_loaders.blob_loaders import Blob
|
||||
from langchain_integrations.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
|
||||
from langchain_community.document_loaders.blob_loaders import Blob
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.text_splitter import TextSplitter
|
||||
|
||||
|
||||
class BaseLoader(ABC):
|
||||
@@ -37,6 +40,8 @@ class BaseLoader(ABC):
|
||||
Returns:
|
||||
List of Documents.
|
||||
"""
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
|
||||
if text_splitter is None:
|
||||
_text_splitter: TextSplitter = RecursiveCharacterTextSplitter()
|
||||
else:
|
||||
@@ -2,7 +2,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union
|
||||
|
||||
from langchain_integrations.document_loaders.blob_loaders.schema import Blob, BlobLoader
|
||||
from langchain_community.document_loaders.blob_loaders.schema import Blob, BlobLoader
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
@@ -43,11 +43,11 @@ class FileSystemBlobLoader(BlobLoader):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.document_loaders.blob_loaders import FileSystemBlobLoader
|
||||
from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader
|
||||
loader = FileSystemBlobLoader("/path/to/directory")
|
||||
for blob in loader.yield_blobs():
|
||||
print(blob)
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -61,7 +61,8 @@ class FileSystemBlobLoader(BlobLoader):
|
||||
"""Initialize with a path to directory and how to glob over it.
|
||||
|
||||
Args:
|
||||
path: Path to directory to load from
|
||||
path: Path to directory to load from or path to file to load.
|
||||
If a path to a file is provided, glob/exclude/suffixes are ignored.
|
||||
glob: Glob pattern relative to the specified path
|
||||
by default set to pick up all non-hidden files
|
||||
exclude: patterns to exclude from results, use glob syntax
|
||||
@@ -75,6 +76,10 @@ class FileSystemBlobLoader(BlobLoader):
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader
|
||||
|
||||
# Load a single file.
|
||||
loader = FileSystemBlobLoader("/path/to/file.txt")
|
||||
|
||||
# Recursively load all text files in a directory.
|
||||
loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt")
|
||||
@@ -91,7 +96,7 @@ class FileSystemBlobLoader(BlobLoader):
|
||||
glob="**/*.txt",
|
||||
exclude=["**/*.py", "**/*.pyc"]
|
||||
)
|
||||
"""
|
||||
""" # noqa: E501
|
||||
if isinstance(path, Path):
|
||||
_path = path
|
||||
elif isinstance(path, str):
|
||||
@@ -118,6 +123,10 @@ class FileSystemBlobLoader(BlobLoader):
|
||||
|
||||
def _yield_paths(self) -> Iterable[Path]:
|
||||
"""Yield paths that match the requested pattern."""
|
||||
if self.path.is_file():
|
||||
yield self.path
|
||||
return
|
||||
|
||||
paths = self.path.glob(self.glob)
|
||||
for path in paths:
|
||||
if self.exclude:
|
||||
@@ -1,14 +1,28 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Iterator, List, Literal, Optional, Sequence, Union
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Iterator,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Union,
|
||||
)
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_integrations.document_loaders.base import BaseBlobParser, BaseLoader
|
||||
from langchain_integrations.document_loaders.blob_loaders import BlobLoader, FileSystemBlobLoader
|
||||
from langchain_integrations.document_loaders.parsers.registry import get_parser
|
||||
from langchain_integrations.text_splitter import TextSplitter
|
||||
from langchain_community.document_loaders.base import BaseBlobParser, BaseLoader
|
||||
from langchain_community.document_loaders.blob_loaders import (
|
||||
BlobLoader,
|
||||
FileSystemBlobLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.parsers.registry import get_parser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain.text_splitter import TextSplitter
|
||||
|
||||
_PathLike = Union[str, Path]
|
||||
|
||||
@@ -23,47 +37,62 @@ class GenericLoader(BaseLoader):
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.document_loaders import GenericLoader
|
||||
from langchain_integrations.document_loaders.blob_loaders import FileSystemBlobLoader
|
||||
|
||||
loader = GenericLoader.from_filesystem(
|
||||
path="path/to/directory",
|
||||
glob="**/[!.]*",
|
||||
suffixes=[".pdf"],
|
||||
show_progress=True,
|
||||
)
|
||||
|
||||
docs = loader.lazy_load()
|
||||
next(docs)
|
||||
|
||||
Example instantiations to change which files are loaded:
|
||||
Parse a specific PDF file:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Recursively load all text files in a directory.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/*.txt")
|
||||
|
||||
# Recursively load all non-hidden files in a directory.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/[!.]*")
|
||||
|
||||
# Load all files in a directory without recursion.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="*")
|
||||
|
||||
Example instantiations to change which parser is used:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.document_loaders.parsers.pdf import PyPDFParser
|
||||
from langchain_community.document_loaders import GenericLoader
|
||||
from langchain_community.document_loaders.parsers.pdf import PyPDFParser
|
||||
|
||||
# Recursively load all text files in a directory.
|
||||
loader = GenericLoader.from_filesystem(
|
||||
"/path/to/dir",
|
||||
glob="**/*.pdf",
|
||||
"my_lovely_pdf.pdf",
|
||||
parser=PyPDFParser()
|
||||
)
|
||||
"""
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.document_loaders import GenericLoader
|
||||
from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader
|
||||
|
||||
|
||||
loader = GenericLoader.from_filesystem(
|
||||
path="path/to/directory",
|
||||
glob="**/[!.]*",
|
||||
suffixes=[".pdf"],
|
||||
show_progress=True,
|
||||
)
|
||||
|
||||
docs = loader.lazy_load()
|
||||
next(docs)
|
||||
|
||||
Example instantiations to change which files are loaded:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Recursively load all text files in a directory.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/*.txt")
|
||||
|
||||
# Recursively load all non-hidden files in a directory.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/[!.]*")
|
||||
|
||||
# Load all files in a directory without recursion.
|
||||
loader = GenericLoader.from_filesystem("/path/to/dir", glob="*")
|
||||
|
||||
Example instantiations to change which parser is used:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.document_loaders.parsers.pdf import PyPDFParser
|
||||
|
||||
# Recursively load all text files in a directory.
|
||||
loader = GenericLoader.from_filesystem(
|
||||
"/path/to/dir",
|
||||
glob="**/*.pdf",
|
||||
parser=PyPDFParser()
|
||||
)
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -110,18 +139,26 @@ class GenericLoader(BaseLoader):
|
||||
suffixes: Optional[Sequence[str]] = None,
|
||||
show_progress: bool = False,
|
||||
parser: Union[DEFAULT, BaseBlobParser] = "default",
|
||||
parser_kwargs: Optional[dict] = None,
|
||||
) -> GenericLoader:
|
||||
"""Create a generic document loader using a filesystem blob loader.
|
||||
|
||||
Args:
|
||||
path: The path to the directory to load documents from.
|
||||
path: The path to the directory to load documents from OR the path to a
|
||||
single file to load. If this is a file, glob, exclude, suffixes
|
||||
will be ignored.
|
||||
glob: The glob pattern to use to find documents.
|
||||
suffixes: The suffixes to use to filter documents. If None, all files
|
||||
matching the glob will be loaded.
|
||||
exclude: A list of patterns to exclude from the loader.
|
||||
show_progress: Whether to show a progress bar or not (requires tqdm).
|
||||
Proxies to the file system loader.
|
||||
parser: A blob parser which knows how to parse blobs into documents
|
||||
parser: A blob parser which knows how to parse blobs into documents,
|
||||
will instantiate a default parser if not provided.
|
||||
The default can be overridden by either passing a parser or
|
||||
setting the class attribute `blob_parser` (the latter
|
||||
should be used with inheritance).
|
||||
parser_kwargs: Keyword arguments to pass to the parser.
|
||||
|
||||
Returns:
|
||||
A generic document loader.
|
||||
@@ -134,7 +171,20 @@ class GenericLoader(BaseLoader):
|
||||
show_progress=show_progress,
|
||||
)
|
||||
if isinstance(parser, str):
|
||||
blob_parser = get_parser(parser)
|
||||
if parser == "default":
|
||||
try:
|
||||
# If there is an implementation of get_parser on the class, use it.
|
||||
blob_parser = cls.get_parser(**(parser_kwargs or {}))
|
||||
except NotImplementedError:
|
||||
# if not then use the global registry.
|
||||
blob_parser = get_parser(parser)
|
||||
else:
|
||||
blob_parser = get_parser(parser)
|
||||
else:
|
||||
blob_parser = parser
|
||||
return cls(blob_loader, blob_parser)
|
||||
|
||||
@staticmethod
|
||||
def get_parser(**kwargs: Any) -> BaseBlobParser:
|
||||
"""Override this method to associate a default parser with the class."""
|
||||
raise NotImplementedError()
|
||||
@@ -6,8 +6,8 @@ from typing import Iterator, Mapping, Optional
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_integrations.document_loaders.base import BaseBlobParser
|
||||
from langchain_integrations.document_loaders.blob_loaders.schema import Blob
|
||||
from langchain_community.document_loaders.base import BaseBlobParser
|
||||
from langchain_community.document_loaders.blob_loaders.schema import Blob
|
||||
|
||||
|
||||
class MimeTypeBasedParser(BaseBlobParser):
|
||||
@@ -22,7 +22,7 @@ class MimeTypeBasedParser(BaseBlobParser):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.document_loaders.parsers.generic import MimeTypeBasedParser
|
||||
from langchain_community.document_loaders.parsers.generic import MimeTypeBasedParser
|
||||
|
||||
parser = MimeTypeBasedParser(
|
||||
handlers={
|
||||
@@ -30,7 +30,7 @@ class MimeTypeBasedParser(BaseBlobParser):
|
||||
},
|
||||
fallback_parser=...,
|
||||
)
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -1,25 +1,36 @@
|
||||
from typing import Any, Dict, Iterator, Optional
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, Iterator, Optional, TYPE_CHECKING
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_integrations.document_loaders.base import BaseBlobParser
|
||||
from langchain_integrations.document_loaders.blob_loaders import Blob
|
||||
from langchain_integrations.document_loaders.parsers.language.cobol import CobolSegmenter
|
||||
from langchain_integrations.document_loaders.parsers.language.javascript import JavaScriptSegmenter
|
||||
from langchain_integrations.document_loaders.parsers.language.python import PythonSegmenter
|
||||
from langchain_integrations.text_splitter import Language
|
||||
from langchain_community.document_loaders.base import BaseBlobParser
|
||||
from langchain_community.document_loaders.blob_loaders import Blob
|
||||
from langchain_community.document_loaders.parsers.language.cobol import CobolSegmenter
|
||||
from langchain_community.document_loaders.parsers.language.javascript import (
|
||||
JavaScriptSegmenter,
|
||||
)
|
||||
from langchain_community.document_loaders.parsers.language.python import PythonSegmenter
|
||||
|
||||
LANGUAGE_EXTENSIONS: Dict[str, str] = {
|
||||
"py": Language.PYTHON,
|
||||
"js": Language.JS,
|
||||
"cobol": Language.COBOL,
|
||||
}
|
||||
if TYPE_CHECKING:
|
||||
from langchain.text_splitter import Language
|
||||
|
||||
LANGUAGE_SEGMENTERS: Dict[str, Any] = {
|
||||
Language.PYTHON: PythonSegmenter,
|
||||
Language.JS: JavaScriptSegmenter,
|
||||
Language.COBOL: CobolSegmenter,
|
||||
}
|
||||
try:
|
||||
from langchain.text_splitter import Language
|
||||
LANGUAGE_EXTENSIONS: Dict[str, str] = {
|
||||
"py": Language.PYTHON,
|
||||
"js": Language.JS,
|
||||
"cobol": Language.COBOL,
|
||||
}
|
||||
|
||||
LANGUAGE_SEGMENTERS: Dict[str, Any] = {
|
||||
Language.PYTHON: PythonSegmenter,
|
||||
Language.JS: JavaScriptSegmenter,
|
||||
Language.COBOL: CobolSegmenter,
|
||||
}
|
||||
except ImportError:
|
||||
LANGUAGE_EXTENSIONS = {}
|
||||
LANGUAGE_SEGMENTERS = {}
|
||||
|
||||
|
||||
class LanguageParser(BaseBlobParser):
|
||||
@@ -40,9 +51,9 @@ class LanguageParser(BaseBlobParser):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.text_splitter.Language
|
||||
from langchain_integrations.document_loaders.generic import GenericLoader
|
||||
from langchain_integrations.document_loaders.parsers import LanguageParser
|
||||
from langchain.text_splitter.Language
|
||||
from langchain_community.document_loaders.generic import GenericLoader
|
||||
from langchain_community.document_loaders.parsers import LanguageParser
|
||||
|
||||
loader = GenericLoader.from_filesystem(
|
||||
"./code",
|
||||
@@ -56,7 +67,7 @@ class LanguageParser(BaseBlobParser):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.text_splitter import Language
|
||||
from langchain.text_splitter import Language
|
||||
|
||||
loader = GenericLoader.from_filesystem(
|
||||
"./code",
|
||||
@@ -7,8 +7,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Union
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_integrations.document_loaders.base import BaseLoader
|
||||
from langchain_integrations.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain_community.document_loaders.base import BaseLoader
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import pandas as pd
|
||||
@@ -49,6 +48,7 @@ class TelegramChatFileLoader(BaseLoader):
|
||||
|
||||
def text_to_docs(text: Union[str, List[str]]) -> List[Document]:
|
||||
"""Convert a string or list of strings to a list of Documents with metadata."""
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
if isinstance(text, str):
|
||||
# Take a single string as one page
|
||||
text = [text]
|
||||
@@ -8,10 +8,12 @@ class BeautifulSoupTransformer(BaseDocumentTransformer):
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
from langchain_integrations.document_transformers import BeautifulSoupTransformer
|
||||
|
||||
from langchain_community.document_transformers import BeautifulSoupTransformer
|
||||
|
||||
bs4_transformer = BeautifulSoupTransformer()
|
||||
docs_transformed = bs4_transformer.transform_documents(docs)
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
@@ -0,0 +1,140 @@
|
||||
"""Document transformers that use OpenAI Functions models"""
|
||||
from typing import Any, Dict, Optional, Sequence, Type, Union
|
||||
|
||||
from langchain_core.documents import BaseDocumentTransformer, Document
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.pydantic_v1 import BaseModel
|
||||
|
||||
|
||||
class OpenAIMetadataTagger(BaseDocumentTransformer, BaseModel):
|
||||
"""Extract metadata tags from document contents using OpenAI functions.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_community.document_transformers import OpenAIMetadataTagger
|
||||
from langchain_core.documents import Document
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"movie_title": { "type": "string" },
|
||||
"critic": { "type": "string" },
|
||||
"tone": {
|
||||
"type": "string",
|
||||
"enum": ["positive", "negative"]
|
||||
},
|
||||
"rating": {
|
||||
"type": "integer",
|
||||
"description": "The number of stars the critic rated the movie"
|
||||
}
|
||||
},
|
||||
"required": ["movie_title", "critic", "tone"]
|
||||
}
|
||||
|
||||
# Must be an OpenAI model that supports functions
|
||||
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
|
||||
tagging_chain = create_tagging_chain(schema, llm)
|
||||
document_transformer = OpenAIMetadataTagger(tagging_chain=tagging_chain)
|
||||
original_documents = [
|
||||
Document(page_content="Review of The Bee Movie\nBy Roger Ebert\n\nThis is the greatest movie ever made. 4 out of 5 stars."),
|
||||
Document(page_content="Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars.", metadata={"reliable": False}),
|
||||
]
|
||||
|
||||
enhanced_documents = document_transformer.transform_documents(original_documents)
|
||||
""" # noqa: E501
|
||||
|
||||
tagging_chain: Any
|
||||
"""The chain used to extract metadata from each document."""
|
||||
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
"""Automatically extract and populate metadata
|
||||
for each document according to the provided schema."""
|
||||
|
||||
new_documents = []
|
||||
|
||||
for document in documents:
|
||||
extracted_metadata: Dict = self.tagging_chain.run(document.page_content) # type: ignore[assignment] # noqa: E501
|
||||
new_document = Document(
|
||||
page_content=document.page_content,
|
||||
metadata={**extracted_metadata, **document.metadata},
|
||||
)
|
||||
new_documents.append(new_document)
|
||||
return new_documents
|
||||
|
||||
async def atransform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> Sequence[Document]:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def create_metadata_tagger(
|
||||
metadata_schema: Union[Dict[str, Any], Type[BaseModel]],
|
||||
llm: BaseLanguageModel,
|
||||
prompt: Optional[ChatPromptTemplate] = None,
|
||||
*,
|
||||
tagging_chain_kwargs: Optional[Dict] = None,
|
||||
) -> OpenAIMetadataTagger:
|
||||
"""Create a DocumentTransformer that uses an OpenAI function chain to automatically
|
||||
tag documents with metadata based on their content and an input schema.
|
||||
|
||||
Args:
|
||||
metadata_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary
|
||||
is passed in, it's assumed to already be a valid JsonSchema.
|
||||
For best results, pydantic.BaseModels should have docstrings describing what
|
||||
the schema represents and descriptions for the parameters.
|
||||
llm: Language model to use, assumed to support the OpenAI function-calling API.
|
||||
Defaults to use "gpt-3.5-turbo-0613"
|
||||
prompt: BasePromptTemplate to pass to the model.
|
||||
|
||||
Returns:
|
||||
An LLMChain that will pass the given function to the model.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_community.document_transformers import create_metadata_tagger
|
||||
from langchain_core.documents import Document
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"movie_title": { "type": "string" },
|
||||
"critic": { "type": "string" },
|
||||
"tone": {
|
||||
"type": "string",
|
||||
"enum": ["positive", "negative"]
|
||||
},
|
||||
"rating": {
|
||||
"type": "integer",
|
||||
"description": "The number of stars the critic rated the movie"
|
||||
}
|
||||
},
|
||||
"required": ["movie_title", "critic", "tone"]
|
||||
}
|
||||
|
||||
# Must be an OpenAI model that supports functions
|
||||
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
|
||||
|
||||
document_transformer = create_metadata_tagger(schema, llm)
|
||||
original_documents = [
|
||||
Document(page_content="Review of The Bee Movie\nBy Roger Ebert\n\nThis is the greatest movie ever made. 4 out of 5 stars."),
|
||||
Document(page_content="Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars.", metadata={"reliable": False}),
|
||||
]
|
||||
|
||||
enhanced_documents = document_transformer.transform_documents(original_documents)
|
||||
""" # noqa: E501
|
||||
from langchain.chains.openai_functions import create_tagging_chain
|
||||
metadata_schema = (
|
||||
metadata_schema
|
||||
if isinstance(metadata_schema, dict)
|
||||
else metadata_schema.schema()
|
||||
)
|
||||
_tagging_chain_kwargs = tagging_chain_kwargs or {}
|
||||
tagging_chain = create_tagging_chain(
|
||||
metadata_schema, llm, prompt=prompt, **_tagging_chain_kwargs
|
||||
)
|
||||
return OpenAIMetadataTagger(tagging_chain=tagging_chain)
|
||||
@@ -0,0 +1,160 @@
|
||||
"""**Embedding models** are wrappers around embedding models
|
||||
from different APIs and services.
|
||||
|
||||
**Embedding models** can be LLMs or not.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Embeddings --> <name>Embeddings # Examples: CohereEmbeddings, HuggingFaceEmbeddings
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
|
||||
from langchain_community.embeddings.aleph_alpha import (
|
||||
AlephAlphaAsymmetricSemanticEmbedding,
|
||||
AlephAlphaSymmetricSemanticEmbedding,
|
||||
)
|
||||
from langchain_community.embeddings.awa import AwaEmbeddings
|
||||
from langchain_community.embeddings.baidu_qianfan_endpoint import (
|
||||
QianfanEmbeddingsEndpoint,
|
||||
)
|
||||
from langchain_community.embeddings.bedrock import BedrockEmbeddings
|
||||
from langchain_community.embeddings.bookend import BookendEmbeddings
|
||||
from langchain_community.embeddings.cache import CacheBackedEmbeddings
|
||||
from langchain_community.embeddings.clarifai import ClarifaiEmbeddings
|
||||
from langchain_community.embeddings.cohere import CohereEmbeddings
|
||||
from langchain_community.embeddings.dashscope import DashScopeEmbeddings
|
||||
from langchain_community.embeddings.databricks import DatabricksEmbeddings
|
||||
from langchain_community.embeddings.deepinfra import DeepInfraEmbeddings
|
||||
from langchain_community.embeddings.edenai import EdenAiEmbeddings
|
||||
from langchain_community.embeddings.elasticsearch import ElasticsearchEmbeddings
|
||||
from langchain_community.embeddings.embaas import EmbaasEmbeddings
|
||||
from langchain_community.embeddings.ernie import ErnieEmbeddings
|
||||
from langchain_community.embeddings.fake import (
|
||||
DeterministicFakeEmbedding,
|
||||
FakeEmbeddings,
|
||||
)
|
||||
from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
|
||||
from langchain_community.embeddings.google_palm import GooglePalmEmbeddings
|
||||
from langchain_community.embeddings.gpt4all import GPT4AllEmbeddings
|
||||
from langchain_community.embeddings.gradient_ai import GradientEmbeddings
|
||||
from langchain_community.embeddings.huggingface import (
|
||||
HuggingFaceBgeEmbeddings,
|
||||
HuggingFaceEmbeddings,
|
||||
HuggingFaceInferenceAPIEmbeddings,
|
||||
HuggingFaceInstructEmbeddings,
|
||||
)
|
||||
from langchain_community.embeddings.huggingface_hub import HuggingFaceHubEmbeddings
|
||||
from langchain_community.embeddings.infinity import InfinityEmbeddings
|
||||
from langchain_community.embeddings.javelin_ai_gateway import JavelinAIGatewayEmbeddings
|
||||
from langchain_community.embeddings.jina import JinaEmbeddings
|
||||
from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
|
||||
from langchain_community.embeddings.llamacpp import LlamaCppEmbeddings
|
||||
from langchain_community.embeddings.localai import LocalAIEmbeddings
|
||||
from langchain_community.embeddings.minimax import MiniMaxEmbeddings
|
||||
from langchain_community.embeddings.mlflow import MlflowEmbeddings
|
||||
from langchain_community.embeddings.mlflow_gateway import MlflowAIGatewayEmbeddings
|
||||
from langchain_community.embeddings.modelscope_hub import ModelScopeEmbeddings
|
||||
from langchain_community.embeddings.mosaicml import MosaicMLInstructorEmbeddings
|
||||
from langchain_community.embeddings.nlpcloud import NLPCloudEmbeddings
|
||||
from langchain_community.embeddings.octoai_embeddings import OctoAIEmbeddings
|
||||
from langchain_community.embeddings.ollama import OllamaEmbeddings
|
||||
from langchain_community.embeddings.sagemaker_endpoint import (
|
||||
SagemakerEndpointEmbeddings,
|
||||
)
|
||||
from langchain_community.embeddings.self_hosted import SelfHostedEmbeddings
|
||||
from langchain_community.embeddings.self_hosted_hugging_face import (
|
||||
SelfHostedHuggingFaceEmbeddings,
|
||||
SelfHostedHuggingFaceInstructEmbeddings,
|
||||
)
|
||||
from langchain_community.embeddings.sentence_transformer import (
|
||||
SentenceTransformerEmbeddings,
|
||||
)
|
||||
from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings
|
||||
from langchain_community.embeddings.tensorflow_hub import TensorflowHubEmbeddings
|
||||
from langchain_community.embeddings.vertexai import VertexAIEmbeddings
|
||||
from langchain_community.embeddings.voyageai import VoyageEmbeddings
|
||||
from langchain_community.embeddings.xinference import XinferenceEmbeddings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
__all__ = [
|
||||
"CacheBackedEmbeddings",
|
||||
"ClarifaiEmbeddings",
|
||||
"CohereEmbeddings",
|
||||
"DatabricksEmbeddings",
|
||||
"ElasticsearchEmbeddings",
|
||||
"FastEmbedEmbeddings",
|
||||
"HuggingFaceEmbeddings",
|
||||
"HuggingFaceInferenceAPIEmbeddings",
|
||||
"InfinityEmbeddings",
|
||||
"GradientEmbeddings",
|
||||
"JinaEmbeddings",
|
||||
"LlamaCppEmbeddings",
|
||||
"HuggingFaceHubEmbeddings",
|
||||
"MlflowEmbeddings",
|
||||
"MlflowAIGatewayEmbeddings",
|
||||
"ModelScopeEmbeddings",
|
||||
"TensorflowHubEmbeddings",
|
||||
"SagemakerEndpointEmbeddings",
|
||||
"HuggingFaceInstructEmbeddings",
|
||||
"MosaicMLInstructorEmbeddings",
|
||||
"SelfHostedEmbeddings",
|
||||
"SelfHostedHuggingFaceEmbeddings",
|
||||
"SelfHostedHuggingFaceInstructEmbeddings",
|
||||
"FakeEmbeddings",
|
||||
"DeterministicFakeEmbedding",
|
||||
"AlephAlphaAsymmetricSemanticEmbedding",
|
||||
"AlephAlphaSymmetricSemanticEmbedding",
|
||||
"SentenceTransformerEmbeddings",
|
||||
"GooglePalmEmbeddings",
|
||||
"MiniMaxEmbeddings",
|
||||
"VertexAIEmbeddings",
|
||||
"BedrockEmbeddings",
|
||||
"DeepInfraEmbeddings",
|
||||
"EdenAiEmbeddings",
|
||||
"DashScopeEmbeddings",
|
||||
"EmbaasEmbeddings",
|
||||
"OctoAIEmbeddings",
|
||||
"SpacyEmbeddings",
|
||||
"NLPCloudEmbeddings",
|
||||
"GPT4AllEmbeddings",
|
||||
"XinferenceEmbeddings",
|
||||
"LocalAIEmbeddings",
|
||||
"AwaEmbeddings",
|
||||
"HuggingFaceBgeEmbeddings",
|
||||
"ErnieEmbeddings",
|
||||
"JavelinAIGatewayEmbeddings",
|
||||
"OllamaEmbeddings",
|
||||
"QianfanEmbeddingsEndpoint",
|
||||
"JohnSnowLabsEmbeddings",
|
||||
"VoyageEmbeddings",
|
||||
"BookendEmbeddings",
|
||||
]
|
||||
|
||||
|
||||
# TODO: this is in here to maintain backwards compatibility
|
||||
class HypotheticalDocumentEmbedder:
|
||||
def __init__(self, *args: Any, **kwargs: Any):
|
||||
logger.warning(
|
||||
"Using a deprecated class. Please use "
|
||||
"`from langchain.chains import HypotheticalDocumentEmbedder` instead"
|
||||
)
|
||||
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
|
||||
|
||||
return H(*args, **kwargs) # type: ignore
|
||||
|
||||
@classmethod
|
||||
def from_llm(cls, *args: Any, **kwargs: Any) -> Any:
|
||||
logger.warning(
|
||||
"Using a deprecated class. Please use "
|
||||
"`from langchain.chains import HypotheticalDocumentEmbedder` instead"
|
||||
)
|
||||
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
|
||||
|
||||
return H.from_llm(*args, **kwargs)
|
||||
@@ -15,9 +15,9 @@ from functools import partial
|
||||
from typing import Callable, List, Sequence, Union, cast
|
||||
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.stores import BaseStore
|
||||
from langchain_core.stores import BaseStore, ByteStore
|
||||
|
||||
from langchain_integrations.storage.encoder_backed import EncoderBackedStore
|
||||
from langchain_community.storage.encoder_backed import EncoderBackedStore
|
||||
|
||||
NAMESPACE_UUID = uuid.UUID(int=1985)
|
||||
|
||||
@@ -62,8 +62,8 @@ class CacheBackedEmbeddings(Embeddings):
|
||||
|
||||
.. code-block: python
|
||||
|
||||
from langchain_integrations.embeddings import CacheBackedEmbeddings, OpenAIEmbeddings
|
||||
from langchain_integrations.storage import LocalFileStore
|
||||
from langchain_community.embeddings import CacheBackedEmbeddings, OpenAIEmbeddings
|
||||
from langchain_community.storage import LocalFileStore
|
||||
|
||||
store = LocalFileStore('./my_cache')
|
||||
|
||||
@@ -77,7 +77,7 @@ class CacheBackedEmbeddings(Embeddings):
|
||||
|
||||
# Embeddings are retrieved from the cache, no computation is done
|
||||
embeddings = embedder.embed_documents(["hello", "goodbye"])
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -151,7 +151,7 @@ class CacheBackedEmbeddings(Embeddings):
|
||||
def from_bytes_store(
|
||||
cls,
|
||||
underlying_embeddings: Embeddings,
|
||||
document_embedding_cache: BaseStore[str, bytes],
|
||||
document_embedding_cache: ByteStore,
|
||||
*,
|
||||
namespace: str = "",
|
||||
) -> CacheBackedEmbeddings:
|
||||
@@ -25,7 +25,7 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings):
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.embeddings import HuggingFaceEmbeddings
|
||||
from langchain_community.embeddings import HuggingFaceEmbeddings
|
||||
|
||||
model_name = "sentence-transformers/all-mpnet-base-v2"
|
||||
model_kwargs = {'device': 'cpu'}
|
||||
@@ -113,7 +113,7 @@ class HuggingFaceInstructEmbeddings(BaseModel, Embeddings):
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.embeddings import HuggingFaceInstructEmbeddings
|
||||
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
||||
|
||||
model_name = "hkunlp/instructor-large"
|
||||
model_kwargs = {'device': 'cpu'}
|
||||
@@ -192,7 +192,7 @@ class HuggingFaceBgeEmbeddings(BaseModel, Embeddings):
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.embeddings import HuggingFaceBgeEmbeddings
|
||||
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
|
||||
|
||||
model_name = "BAAI/bge-large-en"
|
||||
model_kwargs = {'device': 'cpu'}
|
||||
@@ -279,9 +279,15 @@ class HuggingFaceInferenceAPIEmbeddings(BaseModel, Embeddings):
|
||||
"""Your API key for the HuggingFace Inference API."""
|
||||
model_name: str = "sentence-transformers/all-MiniLM-L6-v2"
|
||||
"""The name of the model to use for text embeddings."""
|
||||
api_url: Optional[str] = None
|
||||
"""Custom inference endpoint url. None for using default public url."""
|
||||
|
||||
@property
|
||||
def _api_url(self) -> str:
|
||||
return self.api_url or self._default_api_url
|
||||
|
||||
@property
|
||||
def _default_api_url(self) -> str:
|
||||
return (
|
||||
"https://api-inference.huggingface.co"
|
||||
"/pipeline"
|
||||
@@ -306,7 +312,7 @@ class HuggingFaceInferenceAPIEmbeddings(BaseModel, Embeddings):
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.embeddings import HuggingFaceInferenceAPIEmbeddings
|
||||
from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
|
||||
|
||||
hf_embeddings = HuggingFaceInferenceAPIEmbeddings(
|
||||
api_key="your_api_key",
|
||||
@@ -314,7 +320,7 @@ class HuggingFaceInferenceAPIEmbeddings(BaseModel, Embeddings):
|
||||
)
|
||||
texts = ["Hello, world!", "How are you?"]
|
||||
hf_embeddings.embed_documents(texts)
|
||||
"""
|
||||
""" # noqa: E501
|
||||
response = requests.post(
|
||||
self._api_url,
|
||||
headers=self._headers,
|
||||
@@ -2,10 +2,9 @@ import os
|
||||
import sys
|
||||
from typing import Any, List
|
||||
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.pydantic_v1 import BaseModel, Extra
|
||||
|
||||
from langchain_integrations.embeddings.base import Embeddings
|
||||
|
||||
|
||||
class JohnSnowLabsEmbeddings(BaseModel, Embeddings):
|
||||
"""JohnSnowLabs embedding models
|
||||
@@ -14,11 +13,11 @@ class JohnSnowLabsEmbeddings(BaseModel, Embeddings):
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
|
||||
from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
|
||||
|
||||
embedding = JohnSnowLabsEmbeddings(model='embed_sentence.bert')
|
||||
output = embedding.embed_query("foo bar")
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
model: Any = "embed_sentence.bert"
|
||||
|
||||
@@ -2,7 +2,7 @@ import importlib
|
||||
import logging
|
||||
from typing import Any, Callable, List, Optional
|
||||
|
||||
from langchain_integrations.embeddings.self_hosted import SelfHostedEmbeddings
|
||||
from langchain_community.embeddings.self_hosted import SelfHostedEmbeddings
|
||||
|
||||
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
|
||||
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
|
||||
@@ -69,7 +69,7 @@ class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings):
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.embeddings import SelfHostedHuggingFaceEmbeddings
|
||||
from langchain_community.embeddings import SelfHostedHuggingFaceEmbeddings
|
||||
import runhouse as rh
|
||||
model_name = "sentence-transformers/all-mpnet-base-v2"
|
||||
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
|
||||
@@ -112,13 +112,13 @@ class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings):
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_integrations.embeddings import SelfHostedHuggingFaceInstructEmbeddings
|
||||
from langchain_community.embeddings import SelfHostedHuggingFaceInstructEmbeddings
|
||||
import runhouse as rh
|
||||
model_name = "hkunlp/instructor-large"
|
||||
gpu = rh.cluster(name='rh-a10x', instance_type='A100:1')
|
||||
hf = SelfHostedHuggingFaceInstructEmbeddings(
|
||||
model_name=model_name, hardware=gpu)
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
model_id: str = DEFAULT_INSTRUCT_MODEL
|
||||
"""Model name to use."""
|
||||
@@ -6,7 +6,7 @@ access to the large language model (**LLM**) APIs and services.
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
|
||||
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
@@ -19,227 +19,227 @@ access to the large language model (**LLM**) APIs and services.
|
||||
""" # noqa: E501
|
||||
from typing import Any, Callable, Dict, Type
|
||||
|
||||
from langchain_integrations.llms.base import BaseLLM
|
||||
from langchain_core.language_models.llms import BaseLLM
|
||||
|
||||
|
||||
def _import_ai21() -> Any:
|
||||
from langchain_integrations.llms.ai21 import AI21
|
||||
from langchain_community.llms.ai21 import AI21
|
||||
|
||||
return AI21
|
||||
|
||||
|
||||
def _import_aleph_alpha() -> Any:
|
||||
from langchain_integrations.llms.aleph_alpha import AlephAlpha
|
||||
from langchain_community.llms.aleph_alpha import AlephAlpha
|
||||
|
||||
return AlephAlpha
|
||||
|
||||
|
||||
def _import_amazon_api_gateway() -> Any:
|
||||
from langchain_integrations.llms.amazon_api_gateway import AmazonAPIGateway
|
||||
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
|
||||
|
||||
return AmazonAPIGateway
|
||||
|
||||
|
||||
def _import_anthropic() -> Any:
|
||||
from langchain_integrations.llms.anthropic import Anthropic
|
||||
from langchain_community.llms.anthropic import Anthropic
|
||||
|
||||
return Anthropic
|
||||
|
||||
|
||||
def _import_anyscale() -> Any:
|
||||
from langchain_integrations.llms.anyscale import Anyscale
|
||||
from langchain_community.llms.anyscale import Anyscale
|
||||
|
||||
return Anyscale
|
||||
|
||||
|
||||
def _import_arcee() -> Any:
|
||||
from langchain_integrations.llms.arcee import Arcee
|
||||
from langchain_community.llms.arcee import Arcee
|
||||
|
||||
return Arcee
|
||||
|
||||
|
||||
def _import_aviary() -> Any:
|
||||
from langchain_integrations.llms.aviary import Aviary
|
||||
from langchain_community.llms.aviary import Aviary
|
||||
|
||||
return Aviary
|
||||
|
||||
|
||||
def _import_azureml_endpoint() -> Any:
|
||||
from langchain_integrations.llms.azureml_endpoint import AzureMLOnlineEndpoint
|
||||
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
|
||||
|
||||
return AzureMLOnlineEndpoint
|
||||
|
||||
|
||||
def _import_baidu_qianfan_endpoint() -> Any:
|
||||
from langchain_integrations.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
|
||||
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
|
||||
|
||||
return QianfanLLMEndpoint
|
||||
|
||||
|
||||
def _import_bananadev() -> Any:
|
||||
from langchain_integrations.llms.bananadev import Banana
|
||||
from langchain_community.llms.bananadev import Banana
|
||||
|
||||
return Banana
|
||||
|
||||
|
||||
def _import_baseten() -> Any:
|
||||
from langchain_integrations.llms.baseten import Baseten
|
||||
from langchain_community.llms.baseten import Baseten
|
||||
|
||||
return Baseten
|
||||
|
||||
|
||||
def _import_beam() -> Any:
|
||||
from langchain_integrations.llms.beam import Beam
|
||||
from langchain_community.llms.beam import Beam
|
||||
|
||||
return Beam
|
||||
|
||||
|
||||
def _import_bedrock() -> Any:
|
||||
from langchain_integrations.llms.bedrock import Bedrock
|
||||
from langchain_community.llms.bedrock import Bedrock
|
||||
|
||||
return Bedrock
|
||||
|
||||
|
||||
def _import_bittensor() -> Any:
|
||||
from langchain_integrations.llms.bittensor import NIBittensorLLM
|
||||
from langchain_community.llms.bittensor import NIBittensorLLM
|
||||
|
||||
return NIBittensorLLM
|
||||
|
||||
|
||||
def _import_cerebriumai() -> Any:
|
||||
from langchain_integrations.llms.cerebriumai import CerebriumAI
|
||||
from langchain_community.llms.cerebriumai import CerebriumAI
|
||||
|
||||
return CerebriumAI
|
||||
|
||||
|
||||
def _import_chatglm() -> Any:
|
||||
from langchain_integrations.llms.chatglm import ChatGLM
|
||||
from langchain_community.llms.chatglm import ChatGLM
|
||||
|
||||
return ChatGLM
|
||||
|
||||
|
||||
def _import_clarifai() -> Any:
|
||||
from langchain_integrations.llms.clarifai import Clarifai
|
||||
from langchain_community.llms.clarifai import Clarifai
|
||||
|
||||
return Clarifai
|
||||
|
||||
|
||||
def _import_cohere() -> Any:
|
||||
from langchain_integrations.llms.cohere import Cohere
|
||||
from langchain_community.llms.cohere import Cohere
|
||||
|
||||
return Cohere
|
||||
|
||||
|
||||
def _import_ctransformers() -> Any:
|
||||
from langchain_integrations.llms.ctransformers import CTransformers
|
||||
from langchain_community.llms.ctransformers import CTransformers
|
||||
|
||||
return CTransformers
|
||||
|
||||
|
||||
def _import_ctranslate2() -> Any:
|
||||
from langchain_integrations.llms.ctranslate2 import CTranslate2
|
||||
from langchain_community.llms.ctranslate2 import CTranslate2
|
||||
|
||||
return CTranslate2
|
||||
|
||||
|
||||
def _import_databricks() -> Any:
|
||||
from langchain_integrations.llms.databricks import Databricks
|
||||
from langchain_community.llms.databricks import Databricks
|
||||
|
||||
return Databricks
|
||||
|
||||
|
||||
def _import_databricks_chat() -> Any:
|
||||
from langchain_integrations.chat_models.databricks import ChatDatabricks
|
||||
from langchain_community.chat_models.databricks import ChatDatabricks
|
||||
|
||||
return ChatDatabricks
|
||||
|
||||
|
||||
def _import_deepinfra() -> Any:
|
||||
from langchain_integrations.llms.deepinfra import DeepInfra
|
||||
from langchain_community.llms.deepinfra import DeepInfra
|
||||
|
||||
return DeepInfra
|
||||
|
||||
|
||||
def _import_deepsparse() -> Any:
|
||||
from langchain_integrations.llms.deepsparse import DeepSparse
|
||||
from langchain_community.llms.deepsparse import DeepSparse
|
||||
|
||||
return DeepSparse
|
||||
|
||||
|
||||
def _import_edenai() -> Any:
|
||||
from langchain_integrations.llms.edenai import EdenAI
|
||||
from langchain_community.llms.edenai import EdenAI
|
||||
|
||||
return EdenAI
|
||||
|
||||
|
||||
def _import_fake() -> Any:
|
||||
from langchain_integrations.llms.fake import FakeListLLM
|
||||
from langchain_community.llms.fake import FakeListLLM
|
||||
|
||||
return FakeListLLM
|
||||
|
||||
|
||||
def _import_fireworks() -> Any:
|
||||
from langchain_integrations.llms.fireworks import Fireworks
|
||||
from langchain_community.llms.fireworks import Fireworks
|
||||
|
||||
return Fireworks
|
||||
|
||||
|
||||
def _import_forefrontai() -> Any:
|
||||
from langchain_integrations.llms.forefrontai import ForefrontAI
|
||||
from langchain_community.llms.forefrontai import ForefrontAI
|
||||
|
||||
return ForefrontAI
|
||||
|
||||
|
||||
def _import_gigachat() -> Any:
|
||||
from langchain_integrations.llms.gigachat import GigaChat
|
||||
from langchain_community.llms.gigachat import GigaChat
|
||||
|
||||
return GigaChat
|
||||
|
||||
|
||||
def _import_google_palm() -> Any:
|
||||
from langchain_integrations.llms.google_palm import GooglePalm
|
||||
from langchain_community.llms.google_palm import GooglePalm
|
||||
|
||||
return GooglePalm
|
||||
|
||||
|
||||
def _import_gooseai() -> Any:
|
||||
from langchain_integrations.llms.gooseai import GooseAI
|
||||
from langchain_community.llms.gooseai import GooseAI
|
||||
|
||||
return GooseAI
|
||||
|
||||
|
||||
def _import_gpt4all() -> Any:
|
||||
from langchain_integrations.llms.gpt4all import GPT4All
|
||||
from langchain_community.llms.gpt4all import GPT4All
|
||||
|
||||
return GPT4All
|
||||
|
||||
|
||||
def _import_gradient_ai() -> Any:
|
||||
from langchain_integrations.llms.gradient_ai import GradientLLM
|
||||
from langchain_community.llms.gradient_ai import GradientLLM
|
||||
|
||||
return GradientLLM
|
||||
|
||||
|
||||
def _import_huggingface_endpoint() -> Any:
|
||||
from langchain_integrations.llms.huggingface_endpoint import HuggingFaceEndpoint
|
||||
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
|
||||
|
||||
return HuggingFaceEndpoint
|
||||
|
||||
|
||||
def _import_huggingface_hub() -> Any:
|
||||
from langchain_integrations.llms.huggingface_hub import HuggingFaceHub
|
||||
from langchain_community.llms.huggingface_hub import HuggingFaceHub
|
||||
|
||||
return HuggingFaceHub
|
||||
|
||||
|
||||
def _import_huggingface_pipeline() -> Any:
|
||||
from langchain_integrations.llms.huggingface_pipeline import HuggingFacePipeline
|
||||
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
||||
|
||||
return HuggingFacePipeline
|
||||
|
||||
|
||||
def _import_huggingface_text_gen_inference() -> Any:
|
||||
from langchain_integrations.llms.huggingface_text_gen_inference import (
|
||||
from langchain_community.llms.huggingface_text_gen_inference import (
|
||||
HuggingFaceTextGenInference,
|
||||
)
|
||||
|
||||
@@ -247,283 +247,273 @@ def _import_huggingface_text_gen_inference() -> Any:
|
||||
|
||||
|
||||
def _import_human() -> Any:
|
||||
from langchain_integrations.llms.human import HumanInputLLM
|
||||
from langchain_community.llms.human import HumanInputLLM
|
||||
|
||||
return HumanInputLLM
|
||||
|
||||
|
||||
def _import_javelin_ai_gateway() -> Any:
|
||||
from langchain_integrations.llms.javelin_ai_gateway import JavelinAIGateway
|
||||
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
|
||||
|
||||
return JavelinAIGateway
|
||||
|
||||
|
||||
def _import_koboldai() -> Any:
|
||||
from langchain_integrations.llms.koboldai import KoboldApiLLM
|
||||
from langchain_community.llms.koboldai import KoboldApiLLM
|
||||
|
||||
return KoboldApiLLM
|
||||
|
||||
|
||||
def _import_llamacpp() -> Any:
|
||||
from langchain_integrations.llms.llamacpp import LlamaCpp
|
||||
from langchain_community.llms.llamacpp import LlamaCpp
|
||||
|
||||
return LlamaCpp
|
||||
|
||||
|
||||
def _import_manifest() -> Any:
|
||||
from langchain_integrations.llms.manifest import ManifestWrapper
|
||||
from langchain_community.llms.manifest import ManifestWrapper
|
||||
|
||||
return ManifestWrapper
|
||||
|
||||
|
||||
def _import_minimax() -> Any:
|
||||
from langchain_integrations.llms.minimax import Minimax
|
||||
from langchain_community.llms.minimax import Minimax
|
||||
|
||||
return Minimax
|
||||
|
||||
|
||||
def _import_mlflow() -> Any:
|
||||
from langchain_integrations.llms.mlflow import Mlflow
|
||||
from langchain_community.llms.mlflow import Mlflow
|
||||
|
||||
return Mlflow
|
||||
|
||||
|
||||
def _import_mlflow_chat() -> Any:
|
||||
from langchain_integrations.chat_models.mlflow import ChatMlflow
|
||||
from langchain_community.chat_models.mlflow import ChatMlflow
|
||||
|
||||
return ChatMlflow
|
||||
|
||||
|
||||
def _import_mlflow_ai_gateway() -> Any:
|
||||
from langchain_integrations.llms.mlflow_ai_gateway import MlflowAIGateway
|
||||
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
|
||||
|
||||
return MlflowAIGateway
|
||||
|
||||
|
||||
def _import_modal() -> Any:
|
||||
from langchain_integrations.llms.modal import Modal
|
||||
from langchain_community.llms.modal import Modal
|
||||
|
||||
return Modal
|
||||
|
||||
|
||||
def _import_mosaicml() -> Any:
|
||||
from langchain_integrations.llms.mosaicml import MosaicML
|
||||
from langchain_community.llms.mosaicml import MosaicML
|
||||
|
||||
return MosaicML
|
||||
|
||||
|
||||
def _import_nlpcloud() -> Any:
|
||||
from langchain_integrations.llms.nlpcloud import NLPCloud
|
||||
from langchain_community.llms.nlpcloud import NLPCloud
|
||||
|
||||
return NLPCloud
|
||||
|
||||
|
||||
def _import_octoai_endpoint() -> Any:
|
||||
from langchain_integrations.llms.octoai_endpoint import OctoAIEndpoint
|
||||
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
|
||||
|
||||
return OctoAIEndpoint
|
||||
|
||||
|
||||
def _import_ollama() -> Any:
|
||||
from langchain_integrations.llms.ollama import Ollama
|
||||
from langchain_community.llms.ollama import Ollama
|
||||
|
||||
return Ollama
|
||||
|
||||
|
||||
def _import_opaqueprompts() -> Any:
|
||||
from langchain_integrations.llms.opaqueprompts import OpaquePrompts
|
||||
from langchain_community.llms.opaqueprompts import OpaquePrompts
|
||||
|
||||
return OpaquePrompts
|
||||
|
||||
|
||||
def _import_azure_openai() -> Any:
|
||||
from langchain_openai.llm import AzureOpenAI
|
||||
|
||||
return AzureOpenAI
|
||||
|
||||
|
||||
def _import_openai() -> Any:
|
||||
from langchain_openai.llm import OpenAI
|
||||
|
||||
return OpenAI
|
||||
|
||||
|
||||
def _import_openai_chat() -> Any:
|
||||
from langchain_openai.llm import OpenAIChat
|
||||
|
||||
return OpenAIChat
|
||||
|
||||
|
||||
def _import_openllm() -> Any:
|
||||
from langchain_integrations.llms.openllm import OpenLLM
|
||||
from langchain_community.llms.openllm import OpenLLM
|
||||
|
||||
return OpenLLM
|
||||
|
||||
|
||||
def _import_openlm() -> Any:
|
||||
from langchain_integrations.llms.openlm import OpenLM
|
||||
from langchain_community.llms.openlm import OpenLM
|
||||
|
||||
return OpenLM
|
||||
|
||||
|
||||
def _import_pai_eas_endpoint() -> Any:
|
||||
from langchain_integrations.llms.pai_eas_endpoint import PaiEasEndpoint
|
||||
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
|
||||
|
||||
return PaiEasEndpoint
|
||||
|
||||
|
||||
def _import_petals() -> Any:
|
||||
from langchain_integrations.llms.petals import Petals
|
||||
from langchain_community.llms.petals import Petals
|
||||
|
||||
return Petals
|
||||
|
||||
|
||||
def _import_pipelineai() -> Any:
|
||||
from langchain_integrations.llms.pipelineai import PipelineAI
|
||||
from langchain_community.llms.pipelineai import PipelineAI
|
||||
|
||||
return PipelineAI
|
||||
|
||||
|
||||
def _import_predibase() -> Any:
|
||||
from langchain_integrations.llms.predibase import Predibase
|
||||
from langchain_community.llms.predibase import Predibase
|
||||
|
||||
return Predibase
|
||||
|
||||
|
||||
def _import_predictionguard() -> Any:
|
||||
from langchain_integrations.llms.predictionguard import PredictionGuard
|
||||
from langchain_community.llms.predictionguard import PredictionGuard
|
||||
|
||||
return PredictionGuard
|
||||
|
||||
|
||||
def _import_promptlayer() -> Any:
|
||||
from langchain_integrations.llms.promptlayer_openai import PromptLayerOpenAI
|
||||
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
|
||||
|
||||
return PromptLayerOpenAI
|
||||
|
||||
|
||||
def _import_promptlayer_chat() -> Any:
|
||||
from langchain_integrations.llms.promptlayer_openai import PromptLayerOpenAIChat
|
||||
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
|
||||
|
||||
return PromptLayerOpenAIChat
|
||||
|
||||
|
||||
def _import_replicate() -> Any:
|
||||
from langchain_integrations.llms.replicate import Replicate
|
||||
from langchain_community.llms.replicate import Replicate
|
||||
|
||||
return Replicate
|
||||
|
||||
|
||||
def _import_rwkv() -> Any:
|
||||
from langchain_integrations.llms.rwkv import RWKV
|
||||
from langchain_community.llms.rwkv import RWKV
|
||||
|
||||
return RWKV
|
||||
|
||||
|
||||
def _import_sagemaker_endpoint() -> Any:
|
||||
from langchain_integrations.llms.sagemaker_endpoint import SagemakerEndpoint
|
||||
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint
|
||||
|
||||
return SagemakerEndpoint
|
||||
|
||||
|
||||
def _import_self_hosted() -> Any:
|
||||
from langchain_integrations.llms.self_hosted import SelfHostedPipeline
|
||||
from langchain_community.llms.self_hosted import SelfHostedPipeline
|
||||
|
||||
return SelfHostedPipeline
|
||||
|
||||
|
||||
def _import_self_hosted_hugging_face() -> Any:
|
||||
from langchain_integrations.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
|
||||
from langchain_community.llms.self_hosted_hugging_face import (
|
||||
SelfHostedHuggingFaceLLM,
|
||||
)
|
||||
|
||||
return SelfHostedHuggingFaceLLM
|
||||
|
||||
|
||||
def _import_stochasticai() -> Any:
|
||||
from langchain_integrations.llms.stochasticai import StochasticAI
|
||||
from langchain_community.llms.stochasticai import StochasticAI
|
||||
|
||||
return StochasticAI
|
||||
|
||||
|
||||
def _import_symblai_nebula() -> Any:
|
||||
from langchain_integrations.llms.symblai_nebula import Nebula
|
||||
from langchain_community.llms.symblai_nebula import Nebula
|
||||
|
||||
return Nebula
|
||||
|
||||
|
||||
def _import_textgen() -> Any:
|
||||
from langchain_integrations.llms.textgen import TextGen
|
||||
from langchain_community.llms.textgen import TextGen
|
||||
|
||||
return TextGen
|
||||
|
||||
|
||||
def _import_titan_takeoff() -> Any:
|
||||
from langchain_integrations.llms.titan_takeoff import TitanTakeoff
|
||||
from langchain_community.llms.titan_takeoff import TitanTakeoff
|
||||
|
||||
return TitanTakeoff
|
||||
|
||||
|
||||
def _import_titan_takeoff_pro() -> Any:
|
||||
from langchain_integrations.llms.titan_takeoff_pro import TitanTakeoffPro
|
||||
from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro
|
||||
|
||||
return TitanTakeoffPro
|
||||
|
||||
|
||||
def _import_together() -> Any:
|
||||
from langchain_integrations.llms.together import Together
|
||||
from langchain_community.llms.together import Together
|
||||
|
||||
return Together
|
||||
|
||||
|
||||
def _import_tongyi() -> Any:
|
||||
from langchain_integrations.llms.tongyi import Tongyi
|
||||
from langchain_community.llms.tongyi import Tongyi
|
||||
|
||||
return Tongyi
|
||||
|
||||
|
||||
def _import_vertex() -> Any:
|
||||
from langchain_integrations.llms.vertexai import VertexAI
|
||||
from langchain_community.llms.vertexai import VertexAI
|
||||
|
||||
return VertexAI
|
||||
|
||||
|
||||
def _import_vertex_model_garden() -> Any:
|
||||
from langchain_integrations.llms.vertexai import VertexAIModelGarden
|
||||
from langchain_community.llms.vertexai import VertexAIModelGarden
|
||||
|
||||
return VertexAIModelGarden
|
||||
|
||||
|
||||
def _import_vllm() -> Any:
|
||||
from langchain_integrations.llms.vllm import VLLM
|
||||
from langchain_community.llms.vllm import VLLM
|
||||
|
||||
return VLLM
|
||||
|
||||
|
||||
def _import_vllm_openai() -> Any:
|
||||
from langchain_integrations.llms.vllm import VLLMOpenAI
|
||||
from langchain_community.llms.vllm import VLLMOpenAI
|
||||
|
||||
return VLLMOpenAI
|
||||
|
||||
|
||||
def _import_watsonxllm() -> Any:
|
||||
from langchain_community.llms.watsonxllm import WatsonxLLM
|
||||
|
||||
return WatsonxLLM
|
||||
|
||||
|
||||
def _import_writer() -> Any:
|
||||
from langchain_integrations.llms.writer import Writer
|
||||
from langchain_community.llms.writer import Writer
|
||||
|
||||
return Writer
|
||||
|
||||
|
||||
def _import_xinference() -> Any:
|
||||
from langchain_integrations.llms.xinference import Xinference
|
||||
from langchain_community.llms.xinference import Xinference
|
||||
|
||||
return Xinference
|
||||
|
||||
|
||||
def _import_yandex_gpt() -> Any:
|
||||
from langchain_integrations.llms.yandex import YandexGPT
|
||||
from langchain_community.llms.yandex import YandexGPT
|
||||
|
||||
return YandexGPT
|
||||
|
||||
|
||||
def _import_volcengine_maas() -> Any:
|
||||
from langchain_integrations.llms.volcengine_maas import VolcEngineMaasLLM
|
||||
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
|
||||
|
||||
return VolcEngineMaasLLM
|
||||
|
||||
@@ -629,12 +619,6 @@ def __getattr__(name: str) -> Any:
|
||||
return _import_ollama()
|
||||
elif name == "OpaquePrompts":
|
||||
return _import_opaqueprompts()
|
||||
elif name == "AzureOpenAI":
|
||||
return _import_azure_openai()
|
||||
elif name == "OpenAI":
|
||||
return _import_openai()
|
||||
elif name == "OpenAIChat":
|
||||
return _import_openai_chat()
|
||||
elif name == "OpenLLM":
|
||||
return _import_openllm()
|
||||
elif name == "OpenLM":
|
||||
@@ -685,6 +669,8 @@ def __getattr__(name: str) -> Any:
|
||||
return _import_vllm()
|
||||
elif name == "VLLMOpenAI":
|
||||
return _import_vllm_openai()
|
||||
elif name == "WatsonxLLM":
|
||||
return _import_watsonxllm()
|
||||
elif name == "Writer":
|
||||
return _import_writer()
|
||||
elif name == "Xinference":
|
||||
@@ -712,7 +698,6 @@ __all__ = [
|
||||
"Arcee",
|
||||
"Aviary",
|
||||
"AzureMLOnlineEndpoint",
|
||||
"AzureOpenAI",
|
||||
"Banana",
|
||||
"Baseten",
|
||||
"Beam",
|
||||
@@ -752,8 +737,6 @@ __all__ = [
|
||||
"NIBittensorLLM",
|
||||
"NLPCloud",
|
||||
"Ollama",
|
||||
"OpenAI",
|
||||
"OpenAIChat",
|
||||
"OpenLLM",
|
||||
"OpenLM",
|
||||
"PaiEasEndpoint",
|
||||
@@ -777,6 +760,7 @@ __all__ = [
|
||||
"VertexAIModelGarden",
|
||||
"VLLM",
|
||||
"VLLMOpenAI",
|
||||
"WatsonxLLM",
|
||||
"Writer",
|
||||
"OctoAIEndpoint",
|
||||
"Xinference",
|
||||
@@ -797,7 +781,6 @@ def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
|
||||
"anyscale": _import_anyscale,
|
||||
"arcee": _import_arcee,
|
||||
"aviary": _import_aviary,
|
||||
"azure": _import_azure_openai,
|
||||
"azureml_endpoint": _import_azureml_endpoint,
|
||||
"bananadev": _import_bananadev,
|
||||
"baseten": _import_baseten,
|
||||
@@ -838,7 +821,6 @@ def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
|
||||
"nibittensor": _import_bittensor,
|
||||
"nlpcloud": _import_nlpcloud,
|
||||
"ollama": _import_ollama,
|
||||
"openai": _import_openai,
|
||||
"openlm": _import_openlm,
|
||||
"pai_eas_endpoint": _import_pai_eas_endpoint,
|
||||
"petals": _import_petals,
|
||||
@@ -861,10 +843,11 @@ def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
|
||||
"openllm_client": _import_openllm,
|
||||
"vllm": _import_vllm,
|
||||
"vllm_openai": _import_vllm_openai,
|
||||
"watsonxllm": _import_watsonxllm,
|
||||
"writer": _import_writer,
|
||||
"xinference": _import_xinference,
|
||||
"javelin-ai-gateway": _import_javelin_ai_gateway,
|
||||
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
|
||||
"yandex_gpt": _import_yandex_gpt,
|
||||
"VolcEngineMaasLLM": _import_volcengine_maas(),
|
||||
"VolcEngineMaasLLM": _import_volcengine_maas,
|
||||
}
|
||||
@@ -11,23 +11,22 @@ from typing import (
|
||||
Optional,
|
||||
)
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.language_models.llms import LLM
|
||||
from langchain_core.outputs import GenerationChunk
|
||||
from langchain_core.prompt_values import PromptValue
|
||||
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
|
||||
from langchain_core.utils import (
|
||||
check_package_version,
|
||||
get_from_dict_or_env,
|
||||
get_pydantic_field_names,
|
||||
)
|
||||
from langchain_core.utils.utils import build_extra_kwargs, convert_to_secret_str
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
CallbackManagerForLLMRun,
|
||||
)
|
||||
from langchain_integrations.llms.base import LLM
|
||||
from langchain_core.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class _AnthropicCommon(BaseLanguageModel):
|
||||
client: Any = None #: :meta private:
|
||||
@@ -154,7 +153,7 @@ class Anthropic(LLM, _AnthropicCommon):
|
||||
.. code-block:: python
|
||||
|
||||
import anthropic
|
||||
from langchain_integrations.llms import Anthropic
|
||||
from langchain_community.llms import Anthropic
|
||||
|
||||
model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key")
|
||||
|
||||
@@ -180,7 +179,8 @@ class Anthropic(LLM, _AnthropicCommon):
|
||||
"""Raise warning that this class is deprecated."""
|
||||
warnings.warn(
|
||||
"This Anthropic LLM is deprecated. "
|
||||
"Please use `from langchain_integrations.chat_models import ChatAnthropic` instead"
|
||||
"Please use `from langchain_community.chat_models import ChatAnthropic` "
|
||||
"instead"
|
||||
)
|
||||
return values
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, Iterator, List, Optional
|
||||
|
||||
import requests
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models.llms import LLM
|
||||
from langchain_core.outputs import GenerationChunk
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CloudflareWorkersAI(LLM):
|
||||
"""Langchain LLM class to help to access Cloudflare Workers AI service.
|
||||
|
||||
To use, you must provide an API token and
|
||||
account ID to access Cloudflare Workers AI, and
|
||||
pass it as a named parameter to the constructor.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
|
||||
|
||||
my_account_id = "my_account_id"
|
||||
my_api_token = "my_secret_api_token"
|
||||
llm_model = "@cf/meta/llama-2-7b-chat-int8"
|
||||
|
||||
cf_ai = CloudflareWorkersAI(
|
||||
account_id=my_account_id,
|
||||
api_token=my_api_token,
|
||||
model=llm_model
|
||||
)
|
||||
""" # noqa: E501
|
||||
|
||||
account_id: str
|
||||
api_token: str
|
||||
model: str = "@cf/meta/llama-2-7b-chat-int8"
|
||||
base_url: str = "https://api.cloudflare.com/client/v4/accounts"
|
||||
streaming: bool = False
|
||||
endpoint_url: str = ""
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
"""Initialize the Cloudflare Workers AI class."""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.endpoint_url = f"{self.base_url}/{self.account_id}/ai/run/{self.model}"
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of LLM."""
|
||||
return "cloudflare"
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Default parameters"""
|
||||
return {}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
"""Identifying parameters"""
|
||||
return {
|
||||
"account_id": self.account_id,
|
||||
"api_token": self.api_token,
|
||||
"model": self.model,
|
||||
"base_url": self.base_url,
|
||||
}
|
||||
|
||||
def _call_api(self, prompt: str, params: Dict[str, Any]) -> requests.Response:
|
||||
"""Call Cloudflare Workers API"""
|
||||
headers = {"Authorization": f"Bearer {self.api_token}"}
|
||||
data = {"prompt": prompt, "stream": self.streaming, **params}
|
||||
response = requests.post(self.endpoint_url, headers=headers, json=data)
|
||||
return response
|
||||
|
||||
def _process_response(self, response: requests.Response) -> str:
|
||||
"""Process API response"""
|
||||
if response.ok:
|
||||
data = response.json()
|
||||
return data["result"]["response"]
|
||||
else:
|
||||
raise ValueError(f"Request failed with status {response.status_code}")
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[GenerationChunk]:
|
||||
"""Streaming prediction"""
|
||||
original_steaming: bool = self.streaming
|
||||
self.streaming = True
|
||||
_response_prefix_count = len("data: ")
|
||||
_response_stream_end = b"data: [DONE]"
|
||||
for chunk in self._call_api(prompt, kwargs).iter_lines():
|
||||
if chunk == _response_stream_end:
|
||||
break
|
||||
if len(chunk) > _response_prefix_count:
|
||||
try:
|
||||
data = json.loads(chunk[_response_prefix_count:])
|
||||
except Exception as e:
|
||||
logger.debug(chunk)
|
||||
raise e
|
||||
if data is not None and "response" in data:
|
||||
yield GenerationChunk(text=data["response"])
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(data["response"])
|
||||
logger.debug("stream end")
|
||||
self.streaming = original_steaming
|
||||
|
||||
def _call(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Regular prediction"""
|
||||
if self.streaming:
|
||||
return "".join(
|
||||
[c.text for c in self._stream(prompt, stop, run_manager, **kwargs)]
|
||||
)
|
||||
else:
|
||||
response = self._call_api(prompt, kwargs)
|
||||
return self._process_response(response)
|
||||
@@ -1,11 +1,10 @@
|
||||
from typing import Optional, Type
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain_core.callbacks.manager import CallbackManagerForToolRun
|
||||
from langchain_integrations.chains import LLMChain
|
||||
from langchain_integrations.chat_models import ChatOpenAI
|
||||
from langchain_integrations.tools.amadeus.base import AmadeusBaseTool
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
from langchain_community.tools.amadeus.base import AmadeusBaseTool
|
||||
|
||||
|
||||
class ClosestAirportSchema(BaseModel):
|
||||
@@ -41,17 +40,11 @@ class AmadeusClosestAirport(AmadeusBaseTool):
|
||||
location: str,
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
template = (
|
||||
" What is the nearest airport to {location}? Please respond with the "
|
||||
content = (
|
||||
f" What is the nearest airport to {location}? Please respond with the "
|
||||
" airport's International Air Transport Association (IATA) Location "
|
||||
' Identifier in the following JSON format. JSON: "iataCode": "IATA '
|
||||
' Location Identifier" '
|
||||
)
|
||||
|
||||
llm = ChatOpenAI(temperature=0)
|
||||
|
||||
llm_chain = LLMChain.from_string(llm=llm, template=template)
|
||||
|
||||
output = llm_chain.run(location=location)
|
||||
|
||||
return output
|
||||
return ChatOpenAI(temperature=0).invoke(content)
|
||||
@@ -9,30 +9,20 @@ To use this tool, you must first set as environment variables:
|
||||
Below is a sample script that uses the Clickup tool:
|
||||
|
||||
```python
|
||||
from langchain_integrations.agents import AgentType
|
||||
from langchain_integrations.agents import initialize_agent
|
||||
from langchain_integrations.agent_toolkits.clickup.toolkit import ClickupToolkit
|
||||
from langchain_integrations.llms import OpenAI
|
||||
from langchain_integrations.utilities.clickup import ClickupAPIWrapper
|
||||
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
|
||||
from langchain_community.utilities.clickup import ClickupAPIWrapper
|
||||
|
||||
llm = OpenAI(temperature=0)
|
||||
clickup = ClickupAPIWrapper()
|
||||
toolkit = ClickupToolkit.from_clickup_api_wrapper(clickup)
|
||||
agent = initialize_agent(
|
||||
toolkit.get_tools(),
|
||||
llm,
|
||||
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
"""
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
|
||||
from langchain_core.callbacks.manager import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_integrations.utilities.clickup import ClickupAPIWrapper
|
||||
|
||||
from langchain_community.utilities.clickup import ClickupAPIWrapper
|
||||
|
||||
|
||||
class ClickupAction(BaseTool):
|
||||
@@ -11,30 +11,20 @@ To use this tool, you must first set as environment variables:
|
||||
Below is a sample script that uses the Jira tool:
|
||||
|
||||
```python
|
||||
from langchain_integrations.agents import AgentType
|
||||
from langchain_integrations.agents import initialize_agent
|
||||
from langchain_integrations.agent_toolkits.jira.toolkit import JiraToolkit
|
||||
from langchain_integrations.llms import OpenAI
|
||||
from langchain_integrations.utilities.jira import JiraAPIWrapper
|
||||
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit
|
||||
from langchain_community.utilities.jira import JiraAPIWrapper
|
||||
|
||||
llm = OpenAI(temperature=0)
|
||||
jira = JiraAPIWrapper()
|
||||
toolkit = JiraToolkit.from_jira_api_wrapper(jira)
|
||||
agent = initialize_agent(
|
||||
toolkit.get_tools(),
|
||||
llm,
|
||||
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
"""
|
||||
from typing import Optional
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForToolRun
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
|
||||
from langchain_core.callbacks.manager import CallbackManagerForToolRun
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_integrations.utilities.jira import JiraAPIWrapper
|
||||
|
||||
from langchain_community.utilities.jira import JiraAPIWrapper
|
||||
|
||||
|
||||
class JiraAction(BaseTool):
|
||||
@@ -3,21 +3,20 @@ import logging
|
||||
from time import perf_counter
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
from langchain_core.pydantic_v1 import Field, validator
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManagerForToolRun,
|
||||
)
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_openai.chat_model import _import_tiktoken
|
||||
from langchain_core.pydantic_v1 import Field, validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_integrations.tools.powerbi.prompt import (
|
||||
from langchain_openai.chat_models import _import_tiktoken
|
||||
|
||||
from langchain_community.tools.powerbi.prompt import (
|
||||
BAD_REQUEST_RESPONSE,
|
||||
DEFAULT_FEWSHOT_EXAMPLES,
|
||||
RETRY_RESPONSE,
|
||||
)
|
||||
from langchain_integrations.utilities.powerbi import PowerBIDataset, json_to_md
|
||||
from langchain_community.utilities.powerbi import PowerBIDataset, json_to_md
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -31,7 +30,7 @@ class QueryPowerBITool(BaseTool):
|
||||
|
||||
Example Input: "How many rows are in table1?"
|
||||
""" # noqa: E501
|
||||
llm_chain: LLMChain
|
||||
llm_chain: Any
|
||||
powerbi: PowerBIDataset = Field(exclude=True)
|
||||
examples: Optional[str] = DEFAULT_FEWSHOT_EXAMPLES
|
||||
session_cache: Dict[str, Any] = Field(default_factory=dict, exclude=True)
|
||||
@@ -46,8 +45,8 @@ class QueryPowerBITool(BaseTool):
|
||||
|
||||
@validator("llm_chain")
|
||||
def validate_llm_chain_input_variables( # pylint: disable=E0213
|
||||
cls, llm_chain: LLMChain
|
||||
) -> LLMChain:
|
||||
cls, llm_chain: Any
|
||||
) -> Any:
|
||||
"""Make sure the LLM chain has the correct input variables."""
|
||||
for var in llm_chain.prompt.input_variables:
|
||||
if var not in ["tool_input", "tables", "schemas", "examples"]:
|
||||
@@ -5,15 +5,14 @@ from typing import Any, Dict, Optional
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.callbacks.manager import (
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManagerForToolRun,
|
||||
)
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain_integrations.utilities.spark_sql import SparkSQL
|
||||
from langchain_community.utilities.spark_sql import SparkSQL
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_integrations.tools.spark_sql.prompt import QUERY_CHECKER
|
||||
from langchain_community.tools.spark_sql.prompt import QUERY_CHECKER
|
||||
|
||||
|
||||
class BaseSparkSQLTool(BaseModel):
|
||||
@@ -85,7 +84,7 @@ class QueryCheckerTool(BaseSparkSQLTool, BaseTool):
|
||||
|
||||
template: str = QUERY_CHECKER
|
||||
llm: BaseLanguageModel
|
||||
llm_chain: LLMChain = Field(init=False)
|
||||
llm_chain: Any = Field(init=False)
|
||||
name: str = "query_checker_sql_db"
|
||||
description: str = """
|
||||
Use this tool to double check if your query is correct before executing it.
|
||||
@@ -95,6 +94,7 @@ class QueryCheckerTool(BaseSparkSQLTool, BaseTool):
|
||||
@root_validator(pre=True)
|
||||
def initialize_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
if "llm_chain" not in values:
|
||||
from langchain.chains.llm import LLMChain
|
||||
values["llm_chain"] = LLMChain(
|
||||
llm=values.get("llm"),
|
||||
prompt=PromptTemplate(
|
||||
@@ -5,15 +5,14 @@ from typing import Any, Dict, Optional
|
||||
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
|
||||
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.callbacks.manager import (
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManagerForToolRun,
|
||||
)
|
||||
from langchain_integrations.chains.llm import LLMChain
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain_integrations.utilities.sql_database import SQLDatabase
|
||||
from langchain_community.utilities.sql_database import SQLDatabase
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_integrations.tools.sql_database.prompt import QUERY_CHECKER
|
||||
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
|
||||
|
||||
|
||||
class BaseSQLDatabaseTool(BaseModel):
|
||||
@@ -60,7 +59,9 @@ class InfoSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool):
|
||||
run_manager: Optional[CallbackManagerForToolRun] = None,
|
||||
) -> str:
|
||||
"""Get the schema for tables in a comma-separated list."""
|
||||
return self.db.get_table_info_no_throw(table_names.split(", "))
|
||||
return self.db.get_table_info_no_throw(
|
||||
[t.strip() for t in table_names.split(",")]
|
||||
)
|
||||
|
||||
|
||||
class ListSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool):
|
||||
@@ -84,7 +85,7 @@ class QuerySQLCheckerTool(BaseSQLDatabaseTool, BaseTool):
|
||||
|
||||
template: str = QUERY_CHECKER
|
||||
llm: BaseLanguageModel
|
||||
llm_chain: LLMChain = Field(init=False)
|
||||
llm_chain: Any = Field(init=False)
|
||||
name: str = "sql_db_query_checker"
|
||||
description: str = """
|
||||
Use this tool to double check if your query is correct before executing it.
|
||||
@@ -94,6 +95,7 @@ class QuerySQLCheckerTool(BaseSQLDatabaseTool, BaseTool):
|
||||
@root_validator(pre=True)
|
||||
def initialize_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
if "llm_chain" not in values:
|
||||
from langchain.chains.llm import LLMChain
|
||||
values["llm_chain"] = LLMChain(
|
||||
llm=values.get("llm"),
|
||||
prompt=PromptTemplate(
|
||||
@@ -47,10 +47,8 @@ os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "")
|
||||
# get from https://nla.zapier.com/docs/authentication/
|
||||
os.environ["ZAPIER_NLA_API_KEY"] = os.environ.get("ZAPIER_NLA_API_KEY", "")
|
||||
|
||||
from langchain_integrations.llms import OpenAI
|
||||
from langchain_integrations.agents import initialize_agent
|
||||
from langchain_integrations.agent_toolkits import ZapierToolkit
|
||||
from langchain_integrations.utilities.zapier import ZapierNLAWrapper
|
||||
from langchain_community.agent_toolkits import ZapierToolkit
|
||||
from langchain_community.utilities.zapier import ZapierNLAWrapper
|
||||
|
||||
## step 0. expose gmail 'find email' and slack 'send channel message' actions
|
||||
|
||||
@@ -60,37 +58,27 @@ from langchain_integrations.utilities.zapier import ZapierNLAWrapper
|
||||
# in an oauth scenario, you'd get your own <provider> id (instead of 'demo')
|
||||
# which you route your users through first
|
||||
|
||||
llm = OpenAI(temperature=0)
|
||||
zapier = ZapierNLAWrapper()
|
||||
## To leverage OAuth you may pass the value `nla_oauth_access_token` to
|
||||
## the ZapierNLAWrapper. If you do this there is no need to initialize
|
||||
## the ZAPIER_NLA_API_KEY env variable
|
||||
# zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token="TOKEN_HERE")
|
||||
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
|
||||
agent = initialize_agent(
|
||||
toolkit.get_tools(),
|
||||
llm,
|
||||
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
agent.run(("Summarize the last email I received regarding Silicon Valley Bank. "
|
||||
"Send the summary to the #test-zapier channel in slack."))
|
||||
```
|
||||
|
||||
"""
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from langchain_core._api import warn_deprecated
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManagerForToolRun,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_integrations.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT
|
||||
from langchain_integrations.utilities.zapier import ZapierNLAWrapper
|
||||
|
||||
from langchain_community.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT
|
||||
from langchain_community.utilities.zapier import ZapierNLAWrapper
|
||||
|
||||
|
||||
class ZapierNLARunAction(BaseTool):
|
||||
@@ -3,20 +3,12 @@ import asyncio
|
||||
import os
|
||||
|
||||
from aiohttp import ClientSession
|
||||
from langchain_core.callbacks.manager import atrace_as_chain_group, trace_as_chain_group
|
||||
from langchain_core.tracers.context import tracing_v2_enabled, tracing_enabled
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain.callbacks import tracing_enabled
|
||||
from langchain.callbacks.manager import (
|
||||
atrace_as_chain_group,
|
||||
trace_as_chain_group,
|
||||
tracing_v2_enabled,
|
||||
)
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.chains.constitutional_ai.base import ConstitutionalChain
|
||||
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.llms import OpenAI
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
from langchain_openai.llms import OpenAI
|
||||
|
||||
questions = [
|
||||
(
|
||||
@@ -40,6 +32,7 @@ questions = [
|
||||
|
||||
|
||||
def test_tracing_sequential() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_TRACING"] = "true"
|
||||
|
||||
for q in questions[:3]:
|
||||
@@ -52,6 +45,7 @@ def test_tracing_sequential() -> None:
|
||||
|
||||
|
||||
def test_tracing_session_env_var() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_TRACING"] = "true"
|
||||
os.environ["LANGCHAIN_SESSION"] = "my_session"
|
||||
|
||||
@@ -66,6 +60,7 @@ def test_tracing_session_env_var() -> None:
|
||||
|
||||
|
||||
async def test_tracing_concurrent() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_TRACING"] = "true"
|
||||
aiosession = ClientSession()
|
||||
llm = OpenAI(temperature=0)
|
||||
@@ -79,6 +74,7 @@ async def test_tracing_concurrent() -> None:
|
||||
|
||||
|
||||
async def test_tracing_concurrent_bw_compat_environ() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||
if "LANGCHAIN_TRACING" in os.environ:
|
||||
del os.environ["LANGCHAIN_TRACING"]
|
||||
@@ -96,6 +92,7 @@ async def test_tracing_concurrent_bw_compat_environ() -> None:
|
||||
|
||||
|
||||
def test_tracing_context_manager() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
llm = OpenAI(temperature=0)
|
||||
tools = load_tools(["llm-math", "serpapi"], llm=llm)
|
||||
agent = initialize_agent(
|
||||
@@ -111,6 +108,7 @@ def test_tracing_context_manager() -> None:
|
||||
|
||||
|
||||
async def test_tracing_context_manager_async() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
llm = OpenAI(temperature=0)
|
||||
async_tools = load_tools(["llm-math", "serpapi"], llm=llm)
|
||||
agent = initialize_agent(
|
||||
@@ -130,6 +128,7 @@ async def test_tracing_context_manager_async() -> None:
|
||||
|
||||
|
||||
async def test_tracing_v2_environment_variable() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
||||
|
||||
aiosession = ClientSession()
|
||||
@@ -144,6 +143,7 @@ async def test_tracing_v2_environment_variable() -> None:
|
||||
|
||||
|
||||
def test_tracing_v2_context_manager() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
llm = ChatOpenAI(temperature=0)
|
||||
tools = load_tools(["llm-math", "serpapi"], llm=llm)
|
||||
agent = initialize_agent(
|
||||
@@ -158,6 +158,9 @@ def test_tracing_v2_context_manager() -> None:
|
||||
|
||||
|
||||
def test_tracing_v2_chain_with_tags() -> None:
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.constitutional_ai.base import ConstitutionalChain
|
||||
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
||||
llm = OpenAI(temperature=0)
|
||||
chain = ConstitutionalChain.from_llm(
|
||||
llm,
|
||||
@@ -177,6 +180,7 @@ def test_tracing_v2_chain_with_tags() -> None:
|
||||
|
||||
|
||||
def test_tracing_v2_agent_with_metadata() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
||||
llm = OpenAI(temperature=0)
|
||||
chat = ChatOpenAI(temperature=0)
|
||||
@@ -192,6 +196,7 @@ def test_tracing_v2_agent_with_metadata() -> None:
|
||||
|
||||
|
||||
async def test_tracing_v2_async_agent_with_metadata() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
||||
llm = OpenAI(temperature=0, metadata={"f": "g", "h": "i"})
|
||||
chat = ChatOpenAI(temperature=0, metadata={"f": "g", "h": "i"})
|
||||
@@ -210,6 +215,7 @@ async def test_tracing_v2_async_agent_with_metadata() -> None:
|
||||
|
||||
|
||||
def test_trace_as_group() -> None:
|
||||
from langchain.chains.llm import LLMChain
|
||||
llm = OpenAI(temperature=0.9)
|
||||
prompt = PromptTemplate(
|
||||
input_variables=["product"],
|
||||
@@ -228,6 +234,7 @@ def test_trace_as_group() -> None:
|
||||
|
||||
|
||||
def test_trace_as_group_with_env_set() -> None:
|
||||
from langchain.chains.llm import LLMChain
|
||||
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
||||
llm = OpenAI(temperature=0.9)
|
||||
prompt = PromptTemplate(
|
||||
@@ -251,6 +258,7 @@ def test_trace_as_group_with_env_set() -> None:
|
||||
|
||||
|
||||
async def test_trace_as_group_async() -> None:
|
||||
from langchain.chains.llm import LLMChain
|
||||
llm = OpenAI(temperature=0.9)
|
||||
prompt = PromptTemplate(
|
||||
input_variables=["product"],
|
||||
@@ -1,9 +1,9 @@
|
||||
"""Integration tests for the langchain tracer module."""
|
||||
import asyncio
|
||||
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain.callbacks import get_openai_callback
|
||||
from langchain.llms import OpenAI
|
||||
|
||||
from langchain_community.callbacks import get_openai_callback
|
||||
from langchain_openai.llms import OpenAI
|
||||
|
||||
|
||||
async def test_openai_callback() -> None:
|
||||
@@ -51,6 +51,7 @@ def test_openai_callback_batch_llm() -> None:
|
||||
|
||||
|
||||
def test_openai_callback_agent() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
llm = OpenAI(temperature=0)
|
||||
tools = load_tools(["serpapi", "llm-math"], llm=llm)
|
||||
agent = initialize_agent(
|
||||
@@ -2,19 +2,18 @@
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
|
||||
# Import the internal StreamlitCallbackHandler from its module - and not from
|
||||
# the `langchain.callbacks.streamlit` package - so that we don't end up using
|
||||
# the `langchain_community.callbacks.streamlit` package - so that we don't end up using
|
||||
# Streamlit's externally-provided callback handler.
|
||||
from langchain.callbacks.streamlit.streamlit_callback_handler import (
|
||||
from langchain_community.callbacks.streamlit.streamlit_callback_handler import (
|
||||
StreamlitCallbackHandler,
|
||||
)
|
||||
from langchain.llms import OpenAI
|
||||
from langchain_openai.llms import OpenAI
|
||||
|
||||
|
||||
@pytest.mark.requires("streamlit")
|
||||
def test_streamlit_callback_agent() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
import streamlit as st
|
||||
|
||||
streamlit_callback = StreamlitCallbackHandler(st.container())
|
||||
@@ -3,10 +3,9 @@ import asyncio
|
||||
import os
|
||||
|
||||
from aiohttp import ClientSession
|
||||
from langchain_community.callbacks import wandb_tracing_enabled
|
||||
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain.callbacks.manager import wandb_tracing_enabled
|
||||
from langchain.llms import OpenAI
|
||||
from langchain_openai.llms import OpenAI
|
||||
|
||||
questions = [
|
||||
(
|
||||
@@ -30,6 +29,7 @@ questions = [
|
||||
|
||||
|
||||
def test_tracing_sequential() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
|
||||
os.environ["WANDB_PROJECT"] = "langchain-tracing"
|
||||
|
||||
@@ -46,6 +46,7 @@ def test_tracing_sequential() -> None:
|
||||
|
||||
|
||||
def test_tracing_session_env_var() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
|
||||
|
||||
llm = OpenAI(temperature=0)
|
||||
@@ -60,6 +61,7 @@ def test_tracing_session_env_var() -> None:
|
||||
|
||||
|
||||
async def test_tracing_concurrent() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
|
||||
aiosession = ClientSession()
|
||||
llm = OpenAI(temperature=0)
|
||||
@@ -77,6 +79,7 @@ async def test_tracing_concurrent() -> None:
|
||||
|
||||
|
||||
def test_tracing_context_manager() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
llm = OpenAI(temperature=0)
|
||||
tools = load_tools(
|
||||
["llm-math", "serpapi"],
|
||||
@@ -94,6 +97,7 @@ def test_tracing_context_manager() -> None:
|
||||
|
||||
|
||||
async def test_tracing_context_manager_async() -> None:
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
llm = OpenAI(temperature=0)
|
||||
async_tools = load_tools(
|
||||
["llm-math", "serpapi"],
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.callbacks import CallbackManager
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
@@ -11,11 +12,7 @@ from langchain_core.messages import (
|
||||
from langchain_core.outputs import ChatGeneration, LLMResult
|
||||
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.chains.openai_functions import (
|
||||
create_openai_fn_chain,
|
||||
)
|
||||
from langchain.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
|
||||
from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||
|
||||
_FUNCTIONS: Any = [
|
||||
@@ -185,18 +182,12 @@ def test_functions_call_thoughts() -> None:
|
||||
]
|
||||
prompt = ChatPromptTemplate(messages=prompt_msgs)
|
||||
|
||||
chain = create_openai_fn_chain(
|
||||
_FUNCTIONS,
|
||||
chat,
|
||||
prompt,
|
||||
output_parser=None,
|
||||
)
|
||||
chain = prompt | chat.bind(functions=_FUNCTIONS)
|
||||
|
||||
message = HumanMessage(content="What's the temperature in Shanghai today?")
|
||||
response = chain.generate([{"input": message}])
|
||||
assert isinstance(response.generations[0][0], ChatGeneration)
|
||||
assert isinstance(response.generations[0][0].message, AIMessage)
|
||||
assert "function_call" in response.generations[0][0].message.additional_kwargs
|
||||
response = chain.batch([{"input": message}])
|
||||
assert isinstance(response[0], AIMessage)
|
||||
assert "function_call" in response[0].additional_kwargs
|
||||
|
||||
|
||||
def test_functions_call() -> None:
|
||||
@@ -223,11 +214,6 @@ def test_functions_call() -> None:
|
||||
),
|
||||
]
|
||||
)
|
||||
llm_chain = create_openai_fn_chain(
|
||||
_FUNCTIONS,
|
||||
chat,
|
||||
prompt,
|
||||
output_parser=None,
|
||||
)
|
||||
resp = llm_chain.generate([{}])
|
||||
assert isinstance(resp, LLMResult)
|
||||
chain = prompt | chat.bind(functions=_FUNCTIONS)
|
||||
resp = chain.invoke({})
|
||||
assert isinstance(resp, AIMessage)
|
||||
@@ -2,10 +2,9 @@ from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.document_loaders.concurrent import ConcurrentLoader
|
||||
from langchain.document_loaders.generic import GenericLoader
|
||||
from langchain.document_loaders.parsers import LanguageParser
|
||||
from langchain.text_splitter import Language
|
||||
from langchain_community.document_loaders.concurrent import ConcurrentLoader
|
||||
from langchain_community.document_loaders.generic import GenericLoader
|
||||
from langchain_community.document_loaders.parsers import LanguageParser
|
||||
|
||||
|
||||
def test_language_loader_for_python() -> None:
|
||||
@@ -55,7 +54,7 @@ def test_language_loader_for_python_with_parser_threshold() -> None:
|
||||
loader = GenericLoader.from_filesystem(
|
||||
file_path,
|
||||
glob="hello_world.py",
|
||||
parser=LanguageParser(language=Language.PYTHON, parser_threshold=1000),
|
||||
parser=LanguageParser(language="python", parser_threshold=1000),
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
@@ -127,7 +126,7 @@ def test_language_loader_for_javascript_with_parser_threshold() -> None:
|
||||
loader = GenericLoader.from_filesystem(
|
||||
file_path,
|
||||
glob="hello_world.js",
|
||||
parser=LanguageParser(language=Language.JS, parser_threshold=1000),
|
||||
parser=LanguageParser(language="js", parser_threshold=1000),
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
@@ -140,7 +139,7 @@ def test_concurrent_language_loader_for_javascript_with_parser_threshold() -> No
|
||||
loader = ConcurrentLoader.from_filesystem(
|
||||
file_path,
|
||||
glob="hello_world.js",
|
||||
parser=LanguageParser(language=Language.JS, parser_threshold=1000),
|
||||
parser=LanguageParser(language="js", parser_threshold=1000),
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
@@ -153,7 +152,7 @@ def test_concurrent_language_loader_for_python_with_parser_threshold() -> None:
|
||||
loader = ConcurrentLoader.from_filesystem(
|
||||
file_path,
|
||||
glob="hello_world.py",
|
||||
parser=LanguageParser(language=Language.PYTHON, parser_threshold=1000),
|
||||
parser=LanguageParser(language="python", parser_threshold=1000),
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
@@ -4,14 +4,8 @@ from typing import Generator
|
||||
|
||||
import pytest
|
||||
from langchain_core.outputs import LLMResult
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain_core.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
)
|
||||
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.llms.fireworks import Fireworks
|
||||
from langchain_community.llms.fireworks import Fireworks
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
pytest.skip("fireworks-ai requires Python > 3.8", allow_module_level=True)
|
||||
@@ -29,22 +23,6 @@ def test_fireworks_call(llm: Fireworks) -> None:
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_in_chain() -> None:
|
||||
"""Tests fireworks AI in a Langchain chain"""
|
||||
human_message_prompt = HumanMessagePromptTemplate(
|
||||
prompt=PromptTemplate(
|
||||
template="What is a good name for a company that makes {product}?",
|
||||
input_variables=["product"],
|
||||
)
|
||||
)
|
||||
chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
|
||||
chat = Fireworks()
|
||||
chain = LLMChain(llm=chat, prompt=chat_prompt_template)
|
||||
output = chain.run("football helmets")
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_model_param() -> None:
|
||||
"""Tests model parameters for Fireworks"""
|
||||
@@ -1,12 +1,10 @@
|
||||
import langchain_community.utilities.opaqueprompts as op
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain_core.runnables import RunnableParallel
|
||||
|
||||
import langchain.utilities.opaqueprompts as op
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.llms.opaqueprompts import OpaquePrompts
|
||||
from langchain.memory import ConversationBufferWindowMemory
|
||||
from langchain_openai.llms import OpenAI
|
||||
from langchain_community.llms.opaqueprompts import OpaquePrompts
|
||||
|
||||
prompt_template = """
|
||||
As an AI assistant, you will answer questions according to given context.
|
||||
@@ -45,13 +43,8 @@ Question: ```{question}```
|
||||
|
||||
|
||||
def test_opaqueprompts() -> None:
|
||||
chain = LLMChain(
|
||||
prompt=PromptTemplate.from_template(prompt_template),
|
||||
llm=OpaquePrompts(llm=OpenAI()),
|
||||
memory=ConversationBufferWindowMemory(k=2),
|
||||
)
|
||||
|
||||
output = chain.run(
|
||||
chain = PromptTemplate.from_template(prompt_template) | OpaquePrompts(llm=OpenAI())
|
||||
output = chain.invoke(
|
||||
{
|
||||
"question": "Write a text message to remind John to do password reset \
|
||||
for his website through his email to stay secure."
|
||||
@@ -1,8 +1,5 @@
|
||||
"""Test Nebula API wrapper."""
|
||||
from langchain_core.prompts.prompt import PromptTemplate
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.symblai_nebula import Nebula
|
||||
from langchain_community.llms.symblai_nebula import Nebula
|
||||
|
||||
|
||||
def test_symblai_nebula_call() -> None:
|
||||
@@ -41,7 +38,5 @@ Rhea: Thanks, bye!"""
|
||||
|
||||
instruction = """Identify the main objectives mentioned in this
|
||||
conversation."""
|
||||
prompt = PromptTemplate.from_template(template="{instruction}\n{conversation}")
|
||||
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
||||
output = llm_chain.run(instruction=instruction, conversation=conversation)
|
||||
output = llm.invoke(f"{instruction}\n{conversation}")
|
||||
assert isinstance(output, str)
|
||||
@@ -9,12 +9,9 @@ import os
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.outputs import LLMResult
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from langchain.chains.summarize import load_summarize_chain
|
||||
from langchain.llms import VertexAI, VertexAIModelGarden
|
||||
from langchain_community.llms import VertexAI, VertexAIModelGarden
|
||||
|
||||
|
||||
def test_vertex_initialization() -> None:
|
||||
@@ -152,31 +149,3 @@ def test_vertex_call_count_tokens() -> None:
|
||||
llm = VertexAI()
|
||||
output = llm.get_num_tokens("How are you?")
|
||||
assert output == 4
|
||||
|
||||
|
||||
@pytest.mark.requires("google.cloud.aiplatform")
|
||||
def test_get_num_tokens_be_called_when_using_mapreduce_chain(
|
||||
mocker: MockerFixture,
|
||||
) -> None:
|
||||
from vertexai.language_models._language_models import CountTokensResponse
|
||||
|
||||
m1 = mocker.patch(
|
||||
"vertexai.preview.language_models._PreviewTextGenerationModel.count_tokens",
|
||||
return_value=CountTokensResponse(
|
||||
total_tokens=2,
|
||||
total_billable_characters=2,
|
||||
_count_tokens_response={"total_tokens": 2, "total_billable_characters": 2},
|
||||
),
|
||||
)
|
||||
llm = VertexAI()
|
||||
chain = load_summarize_chain(
|
||||
llm,
|
||||
chain_type="map_reduce",
|
||||
return_intermediate_steps=False,
|
||||
)
|
||||
doc = Document(page_content="Hi")
|
||||
output = chain({"input_documents": [doc]})
|
||||
assert isinstance(output["output_text"], str)
|
||||
m1.assert_called_once()
|
||||
assert llm._llm_type == "vertexai"
|
||||
assert llm.model_name == llm.client._model_id
|
||||
@@ -3,11 +3,10 @@ from typing import Any, List
|
||||
|
||||
import pytest
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain.agents.load_tools import load_tools
|
||||
from langchain.tools import ArxivQueryRun
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain.utilities import ArxivAPIWrapper
|
||||
|
||||
from langchain_community.tools import ArxivQueryRun
|
||||
from langchain_community.utilities import ArxivAPIWrapper
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -142,6 +141,7 @@ def test_load_returns_full_set_of_metadata() -> None:
|
||||
|
||||
|
||||
def _load_arxiv_from_universal_entry(**kwargs: Any) -> BaseTool:
|
||||
from langchain.agents.load_tools import load_tools
|
||||
tools = load_tools(["arxiv"], **kwargs)
|
||||
assert len(tools) == 1, "loaded more than 1 tool"
|
||||
return tools[0]
|
||||
@@ -3,11 +3,10 @@ from typing import Any, List
|
||||
|
||||
import pytest
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain.agents.load_tools import load_tools
|
||||
from langchain.tools import PubmedQueryRun
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain.utilities import PubMedAPIWrapper
|
||||
|
||||
from langchain_community.tools import PubmedQueryRun
|
||||
from langchain_community.utilities import PubMedAPIWrapper
|
||||
|
||||
xmltodict = pytest.importorskip("xmltodict")
|
||||
|
||||
@@ -135,6 +134,7 @@ def test_load_returns_full_set_of_metadata() -> None:
|
||||
|
||||
|
||||
def _load_pubmed_from_universal_entry(**kwargs: Any) -> BaseTool:
|
||||
from langchain.agents.load_tools import load_tools
|
||||
tools = load_tools(["pubmed"], **kwargs)
|
||||
assert len(tools) == 1, "loaded more than 1 tool"
|
||||
return tools[0]
|
||||
@@ -1,14 +1,9 @@
|
||||
import os
|
||||
from typing import Generator, List, Union
|
||||
from typing import Union
|
||||
|
||||
import pytest
|
||||
from langchain_core.documents import Document
|
||||
from vcr.request import Request
|
||||
|
||||
from langchain.document_loaders import TextLoader
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.text_splitter import CharacterTextSplitter
|
||||
|
||||
# Those environment variables turn on Deep Lake pytest mode.
|
||||
# It significantly makes tests run much faster.
|
||||
# Need to run before `import deeplake`
|
||||
@@ -47,35 +42,3 @@ def vcr_config() -> dict:
|
||||
],
|
||||
"ignore_localhost": True,
|
||||
}
|
||||
|
||||
|
||||
# Define a fixture that yields a generator object returning a list of documents
|
||||
@pytest.fixture(scope="function")
|
||||
def documents() -> Generator[List[Document], None, None]:
|
||||
"""Return a generator that yields a list of documents."""
|
||||
|
||||
# Create a CharacterTextSplitter object for splitting the documents into chunks
|
||||
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
|
||||
|
||||
# Load the documents from a file located in the fixtures directory
|
||||
documents = TextLoader(
|
||||
os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt")
|
||||
).load()
|
||||
|
||||
# Yield the documents split into chunks
|
||||
yield text_splitter.split_documents(documents)
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def texts() -> Generator[List[str], None, None]:
|
||||
# Load the documents from a file located in the fixtures directory
|
||||
documents = TextLoader(
|
||||
os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt")
|
||||
).load()
|
||||
|
||||
yield [doc.page_content for doc in documents]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def embedding_openai() -> OpenAIEmbeddings:
|
||||
return OpenAIEmbeddings()
|
||||
@@ -0,0 +1,85 @@
|
||||
"""Test CallbackManager."""
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from langchain_community.callbacks import get_openai_callback
|
||||
from langchain_core.callbacks.manager import trace_as_chain_group, CallbackManager
|
||||
from langchain_core.outputs import LLMResult
|
||||
from langchain_core.tracers.langchain import LangChainTracer, wait_for_all_tracers
|
||||
from langchain_openai.llms import BaseOpenAI
|
||||
|
||||
|
||||
def test_callback_manager_configure_context_vars(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
"""Test callback manager configuration."""
|
||||
monkeypatch.setenv("LANGCHAIN_TRACING_V2", "true")
|
||||
monkeypatch.setenv("LANGCHAIN_TRACING", "false")
|
||||
with patch.object(LangChainTracer, "_update_run_single"):
|
||||
with patch.object(LangChainTracer, "_persist_run_single"):
|
||||
with trace_as_chain_group("test") as group_manager:
|
||||
assert len(group_manager.handlers) == 1
|
||||
tracer = group_manager.handlers[0]
|
||||
assert isinstance(tracer, LangChainTracer)
|
||||
|
||||
with get_openai_callback() as cb:
|
||||
# This is a new empty callback handler
|
||||
assert cb.successful_requests == 0
|
||||
assert cb.total_tokens == 0
|
||||
|
||||
# configure adds this openai cb but doesn't modify the group manager
|
||||
mngr = CallbackManager.configure(group_manager)
|
||||
assert mngr.handlers == [tracer, cb]
|
||||
assert group_manager.handlers == [tracer]
|
||||
|
||||
response = LLMResult(
|
||||
generations=[],
|
||||
llm_output={
|
||||
"token_usage": {
|
||||
"prompt_tokens": 2,
|
||||
"completion_tokens": 1,
|
||||
"total_tokens": 3,
|
||||
},
|
||||
"model_name": BaseOpenAI.__fields__["model_name"].default,
|
||||
},
|
||||
)
|
||||
mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response)
|
||||
|
||||
# The callback handler has been updated
|
||||
assert cb.successful_requests == 1
|
||||
assert cb.total_tokens == 3
|
||||
assert cb.prompt_tokens == 2
|
||||
assert cb.completion_tokens == 1
|
||||
assert cb.total_cost > 0
|
||||
|
||||
with get_openai_callback() as cb:
|
||||
# This is a new empty callback handler
|
||||
assert cb.successful_requests == 0
|
||||
assert cb.total_tokens == 0
|
||||
|
||||
# configure adds this openai cb but doesn't modify the group manager
|
||||
mngr = CallbackManager.configure(group_manager)
|
||||
assert mngr.handlers == [tracer, cb]
|
||||
assert group_manager.handlers == [tracer]
|
||||
|
||||
response = LLMResult(
|
||||
generations=[],
|
||||
llm_output={
|
||||
"token_usage": {
|
||||
"prompt_tokens": 2,
|
||||
"completion_tokens": 1,
|
||||
"total_tokens": 3,
|
||||
},
|
||||
"model_name": BaseOpenAI.__fields__["model_name"].default,
|
||||
},
|
||||
)
|
||||
mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response)
|
||||
|
||||
# The callback handler has been updated
|
||||
assert cb.successful_requests == 1
|
||||
assert cb.total_tokens == 3
|
||||
assert cb.prompt_tokens == 2
|
||||
assert cb.completion_tokens == 1
|
||||
assert cb.total_cost > 0
|
||||
wait_for_all_tracers()
|
||||
assert LangChainTracer._persist_run_single.call_count == 1 # type: ignore
|
||||
@@ -0,0 +1,37 @@
|
||||
from langchain_community.callbacks import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"AimCallbackHandler",
|
||||
"ArgillaCallbackHandler",
|
||||
"ArizeCallbackHandler",
|
||||
"PromptLayerCallbackHandler",
|
||||
"ArthurCallbackHandler",
|
||||
"ClearMLCallbackHandler",
|
||||
"CometCallbackHandler",
|
||||
"ContextCallbackHandler",
|
||||
"FileCallbackHandler",
|
||||
"HumanApprovalCallbackHandler",
|
||||
"InfinoCallbackHandler",
|
||||
"MlflowCallbackHandler",
|
||||
"LLMonitorCallbackHandler",
|
||||
"OpenAICallbackHandler",
|
||||
"StdOutCallbackHandler",
|
||||
"AsyncIteratorCallbackHandler",
|
||||
"StreamingStdOutCallbackHandler",
|
||||
"FinalStreamingStdOutCallbackHandler",
|
||||
"LLMThoughtLabeler",
|
||||
"LangChainTracer",
|
||||
"StreamlitCallbackHandler",
|
||||
"WandbCallbackHandler",
|
||||
"WhyLabsCallbackHandler",
|
||||
"get_openai_callback",
|
||||
"wandb_tracing_enabled",
|
||||
"FlyteCallbackHandler",
|
||||
"SageMakerCallbackHandler",
|
||||
"LabelStudioCallbackHandler",
|
||||
"TrubricsCallbackHandler",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -1,12 +1,11 @@
|
||||
import pathlib
|
||||
|
||||
from langchain.chat_loaders import slack, utils
|
||||
from langchain_community.chat_loaders import slack, utils
|
||||
|
||||
|
||||
def test_slack_chat_loader() -> None:
|
||||
chat_path = (
|
||||
pathlib.Path(__file__).parents[2]
|
||||
/ "integration_tests"
|
||||
/ "examples"
|
||||
/ "slack_export.zip"
|
||||
)
|
||||
@@ -0,0 +1,54 @@
|
||||
"""Test Anthropic Chat API wrapper."""
|
||||
from typing import List
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
|
||||
from langchain_community.chat_models import BedrockChat
|
||||
from langchain_community.chat_models.meta import convert_messages_to_prompt_llama
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("messages", "expected"),
|
||||
[
|
||||
([HumanMessage(content="Hello")], "[INST] Hello [/INST]"),
|
||||
(
|
||||
[HumanMessage(content="Hello"), AIMessage(content="Answer:")],
|
||||
"[INST] Hello [/INST]\nAnswer:",
|
||||
),
|
||||
(
|
||||
[
|
||||
SystemMessage(content="You're an assistant"),
|
||||
HumanMessage(content="Hello"),
|
||||
AIMessage(content="Answer:"),
|
||||
],
|
||||
"<<SYS>> You're an assistant <</SYS>>\n[INST] Hello [/INST]\nAnswer:",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_formatting(messages: List[BaseMessage], expected: str) -> None:
|
||||
result = convert_messages_to_prompt_llama(messages)
|
||||
assert result == expected
|
||||
|
||||
|
||||
def test_anthropic_bedrock() -> None:
|
||||
client = MagicMock()
|
||||
respbody = MagicMock(
|
||||
read=MagicMock(
|
||||
return_value=MagicMock(
|
||||
decode=MagicMock(return_value=b'{"completion":"Hi back"}')
|
||||
)
|
||||
)
|
||||
)
|
||||
client.invoke_model.return_value = {"body": respbody}
|
||||
model = BedrockChat(model_id="anthropic.claude-v2", client=client)
|
||||
|
||||
# should not throw an error
|
||||
model.invoke("hello there")
|
||||
@@ -0,0 +1,36 @@
|
||||
from langchain_community.chat_models import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"BedrockChat",
|
||||
"FakeListChatModel",
|
||||
"PromptLayerChatOpenAI",
|
||||
"ChatEverlyAI",
|
||||
"ChatAnthropic",
|
||||
"ChatCohere",
|
||||
"ChatDatabricks",
|
||||
"ChatGooglePalm",
|
||||
"ChatMlflow",
|
||||
"ChatMLflowAIGateway",
|
||||
"ChatOllama",
|
||||
"ChatVertexAI",
|
||||
"JinaChat",
|
||||
"HumanInputChatModel",
|
||||
"MiniMaxChat",
|
||||
"ChatAnyscale",
|
||||
"ChatLiteLLM",
|
||||
"ErnieBotChat",
|
||||
"ChatJavelinAIGateway",
|
||||
"ChatKonko",
|
||||
"PaiEasChatEndpoint",
|
||||
"QianfanChatEndpoint",
|
||||
"ChatFireworks",
|
||||
"ChatYandexGPT",
|
||||
"ChatBaichuan",
|
||||
"ChatHunyuan",
|
||||
"GigaChat",
|
||||
"VolcEngineMaasChat",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -1,17 +1,25 @@
|
||||
"""Tests for the various PDF parsers."""
|
||||
from pathlib import Path
|
||||
from typing import Iterator
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.document_loaders.base import BaseBlobParser
|
||||
from langchain.document_loaders.blob_loaders import Blob
|
||||
from langchain.document_loaders.parsers.pdf import (
|
||||
from langchain_community.document_loaders.base import BaseBlobParser
|
||||
from langchain_community.document_loaders.blob_loaders import Blob
|
||||
from langchain_community.document_loaders.parsers.pdf import (
|
||||
PDFMinerParser,
|
||||
PyMuPDFParser,
|
||||
PyPDFium2Parser,
|
||||
PyPDFParser,
|
||||
)
|
||||
from tests.data import HELLO_PDF, LAYOUT_PARSER_PAPER_PDF
|
||||
|
||||
_THIS_DIR = Path(__file__).parents[3]
|
||||
|
||||
_EXAMPLES_DIR = _THIS_DIR / "examples"
|
||||
|
||||
# Paths to test PDF files
|
||||
HELLO_PDF = _EXAMPLES_DIR / "hello.pdf"
|
||||
LAYOUT_PARSER_PAPER_PDF = _EXAMPLES_DIR / "layout-parser-paper.pdf"
|
||||
|
||||
|
||||
def _assert_with_parser(parser: BaseBlobParser, splits_by_page: bool = True) -> None:
|
||||
@@ -0,0 +1,59 @@
|
||||
from langchain_community.embeddings import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"CacheBackedEmbeddings",
|
||||
"ClarifaiEmbeddings",
|
||||
"CohereEmbeddings",
|
||||
"DatabricksEmbeddings",
|
||||
"ElasticsearchEmbeddings",
|
||||
"FastEmbedEmbeddings",
|
||||
"HuggingFaceEmbeddings",
|
||||
"HuggingFaceInferenceAPIEmbeddings",
|
||||
"InfinityEmbeddings",
|
||||
"GradientEmbeddings",
|
||||
"JinaEmbeddings",
|
||||
"LlamaCppEmbeddings",
|
||||
"HuggingFaceHubEmbeddings",
|
||||
"MlflowAIGatewayEmbeddings",
|
||||
"MlflowEmbeddings",
|
||||
"ModelScopeEmbeddings",
|
||||
"TensorflowHubEmbeddings",
|
||||
"SagemakerEndpointEmbeddings",
|
||||
"HuggingFaceInstructEmbeddings",
|
||||
"MosaicMLInstructorEmbeddings",
|
||||
"SelfHostedEmbeddings",
|
||||
"SelfHostedHuggingFaceEmbeddings",
|
||||
"SelfHostedHuggingFaceInstructEmbeddings",
|
||||
"FakeEmbeddings",
|
||||
"DeterministicFakeEmbedding",
|
||||
"AlephAlphaAsymmetricSemanticEmbedding",
|
||||
"AlephAlphaSymmetricSemanticEmbedding",
|
||||
"SentenceTransformerEmbeddings",
|
||||
"GooglePalmEmbeddings",
|
||||
"MiniMaxEmbeddings",
|
||||
"VertexAIEmbeddings",
|
||||
"BedrockEmbeddings",
|
||||
"DeepInfraEmbeddings",
|
||||
"EdenAiEmbeddings",
|
||||
"DashScopeEmbeddings",
|
||||
"EmbaasEmbeddings",
|
||||
"OctoAIEmbeddings",
|
||||
"SpacyEmbeddings",
|
||||
"NLPCloudEmbeddings",
|
||||
"GPT4AllEmbeddings",
|
||||
"XinferenceEmbeddings",
|
||||
"LocalAIEmbeddings",
|
||||
"AwaEmbeddings",
|
||||
"HuggingFaceBgeEmbeddings",
|
||||
"ErnieEmbeddings",
|
||||
"JavelinAIGatewayEmbeddings",
|
||||
"OllamaEmbeddings",
|
||||
"QianfanEmbeddingsEndpoint",
|
||||
"JohnSnowLabsEmbeddings",
|
||||
"VoyageEmbeddings",
|
||||
"BookendEmbeddings",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -0,0 +1,91 @@
|
||||
from langchain_core.language_models.llms import BaseLLM
|
||||
|
||||
from langchain_community import llms
|
||||
|
||||
EXPECT_ALL = [
|
||||
"AI21",
|
||||
"AlephAlpha",
|
||||
"AmazonAPIGateway",
|
||||
"Anthropic",
|
||||
"Anyscale",
|
||||
"Arcee",
|
||||
"Aviary",
|
||||
"AzureMLOnlineEndpoint",
|
||||
"Banana",
|
||||
"Baseten",
|
||||
"Beam",
|
||||
"Bedrock",
|
||||
"CTransformers",
|
||||
"CTranslate2",
|
||||
"CerebriumAI",
|
||||
"ChatGLM",
|
||||
"Clarifai",
|
||||
"Cohere",
|
||||
"Databricks",
|
||||
"DeepInfra",
|
||||
"DeepSparse",
|
||||
"EdenAI",
|
||||
"FakeListLLM",
|
||||
"Fireworks",
|
||||
"ForefrontAI",
|
||||
"GigaChat",
|
||||
"GPT4All",
|
||||
"GooglePalm",
|
||||
"GooseAI",
|
||||
"GradientLLM",
|
||||
"HuggingFaceEndpoint",
|
||||
"HuggingFaceHub",
|
||||
"HuggingFacePipeline",
|
||||
"HuggingFaceTextGenInference",
|
||||
"HumanInputLLM",
|
||||
"KoboldApiLLM",
|
||||
"LlamaCpp",
|
||||
"TextGen",
|
||||
"ManifestWrapper",
|
||||
"Minimax",
|
||||
"MlflowAIGateway",
|
||||
"Modal",
|
||||
"MosaicML",
|
||||
"Nebula",
|
||||
"NIBittensorLLM",
|
||||
"NLPCloud",
|
||||
"Ollama",
|
||||
"OpenLLM",
|
||||
"OpenLM",
|
||||
"PaiEasEndpoint",
|
||||
"Petals",
|
||||
"PipelineAI",
|
||||
"Predibase",
|
||||
"PredictionGuard",
|
||||
"PromptLayerOpenAI",
|
||||
"PromptLayerOpenAIChat",
|
||||
"OpaquePrompts",
|
||||
"RWKV",
|
||||
"Replicate",
|
||||
"SagemakerEndpoint",
|
||||
"SelfHostedHuggingFaceLLM",
|
||||
"SelfHostedPipeline",
|
||||
"StochasticAI",
|
||||
"TitanTakeoff",
|
||||
"TitanTakeoffPro",
|
||||
"Tongyi",
|
||||
"VertexAI",
|
||||
"VertexAIModelGarden",
|
||||
"VLLM",
|
||||
"VLLMOpenAI",
|
||||
"Writer",
|
||||
"OctoAIEndpoint",
|
||||
"Xinference",
|
||||
"JavelinAIGateway",
|
||||
"QianfanLLMEndpoint",
|
||||
"YandexGPT",
|
||||
"VolcEngineMaasLLM",
|
||||
"WatsonxLLM",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
"""Simple test to make sure all things can be imported."""
|
||||
for cls in llms.__all__:
|
||||
assert issubclass(getattr(llms, cls), BaseLLM)
|
||||
assert set(llms.__all__) == set(EXPECT_ALL)
|
||||
@@ -1,10 +1,11 @@
|
||||
from typing import List, Type
|
||||
|
||||
import langchain.tools
|
||||
from langchain.tools import _DEPRECATED_TOOLS
|
||||
from langchain.tools import __all__ as tools_all
|
||||
from langchain_core.tools import BaseTool, StructuredTool
|
||||
|
||||
import langchain_community.tools
|
||||
from langchain_community.tools import _DEPRECATED_TOOLS
|
||||
from langchain_community.tools import __all__ as tools_all
|
||||
|
||||
_EXCLUDE = {
|
||||
BaseTool,
|
||||
StructuredTool,
|
||||
@@ -17,7 +18,7 @@ def _get_tool_classes(skip_tools_without_default_names: bool) -> List[Type[BaseT
|
||||
if tool_class_name in _DEPRECATED_TOOLS:
|
||||
continue
|
||||
# Resolve the str to the class
|
||||
tool_class = getattr(langchain.tools, tool_class_name)
|
||||
tool_class = getattr(langchain_community.tools, tool_class_name)
|
||||
if isinstance(tool_class, type) and issubclass(tool_class, BaseTool):
|
||||
if tool_class in _EXCLUDE:
|
||||
continue
|
||||
@@ -0,0 +1,728 @@
|
||||
"""Test FAISS functionality."""
|
||||
import datetime
|
||||
import math
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
|
||||
from typing import Union
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
from langchain_community.docstore.base import Docstore
|
||||
from langchain_community.docstore.in_memory import InMemoryDocstore
|
||||
from langchain_community.vectorstores.faiss import FAISS
|
||||
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
|
||||
|
||||
|
||||
_PAGE_CONTENT = """This is a page about LangChain.
|
||||
|
||||
It is a really cool framework.
|
||||
|
||||
What isn't there to love about langchain?
|
||||
|
||||
Made in 2022."""
|
||||
|
||||
|
||||
class FakeDocstore(Docstore):
|
||||
"""Fake docstore for testing purposes."""
|
||||
|
||||
def search(self, search: str) -> Union[str, Document]:
|
||||
"""Return the fake document."""
|
||||
document = Document(page_content=_PAGE_CONTENT)
|
||||
return document
|
||||
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss() -> None:
|
||||
"""Test end to end construction and search."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
output = docsearch.similarity_search("foo", k=1)
|
||||
assert output == [Document(page_content="foo")]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_afrom_texts() -> None:
|
||||
"""Test end to end construction and search."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
output = await docsearch.asimilarity_search("foo", k=1)
|
||||
assert output == [Document(page_content="foo")]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_vector_sim() -> None:
|
||||
"""Test vector similarity."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
query_vec = FakeEmbeddings().embed_query(text="foo")
|
||||
output = docsearch.similarity_search_by_vector(query_vec, k=1)
|
||||
assert output == [Document(page_content="foo")]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_vector_sim() -> None:
|
||||
"""Test vector similarity."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
query_vec = await FakeEmbeddings().aembed_query(text="foo")
|
||||
output = await docsearch.asimilarity_search_by_vector(query_vec, k=1)
|
||||
assert output == [Document(page_content="foo")]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_vector_sim_with_score_threshold() -> None:
|
||||
"""Test vector similarity."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
query_vec = FakeEmbeddings().embed_query(text="foo")
|
||||
output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2)
|
||||
assert output == [Document(page_content="foo")]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_vector_async_sim_with_score_threshold() -> None:
|
||||
"""Test vector similarity."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
query_vec = await FakeEmbeddings().aembed_query(text="foo")
|
||||
output = await docsearch.asimilarity_search_by_vector(
|
||||
query_vec, k=2, score_threshold=0.2
|
||||
)
|
||||
assert output == [Document(page_content="foo")]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_similarity_search_with_score_by_vector() -> None:
|
||||
"""Test vector similarity with score by vector."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
query_vec = FakeEmbeddings().embed_query(text="foo")
|
||||
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
|
||||
assert len(output) == 1
|
||||
assert output[0][0] == Document(page_content="foo")
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_similarity_async_search_with_score_by_vector() -> None:
|
||||
"""Test vector similarity with score by vector."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
query_vec = await FakeEmbeddings().aembed_query(text="foo")
|
||||
output = await docsearch.asimilarity_search_with_score_by_vector(query_vec, k=1)
|
||||
assert len(output) == 1
|
||||
assert output[0][0] == Document(page_content="foo")
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_similarity_search_with_score_by_vector_with_score_threshold() -> None:
|
||||
"""Test vector similarity with score by vector."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
query_vec = FakeEmbeddings().embed_query(text="foo")
|
||||
output = docsearch.similarity_search_with_score_by_vector(
|
||||
query_vec,
|
||||
k=2,
|
||||
score_threshold=0.2,
|
||||
)
|
||||
assert len(output) == 1
|
||||
assert output[0][0] == Document(page_content="foo")
|
||||
assert output[0][1] < 0.2
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_sim_asearch_with_score_by_vector_with_score_threshold() -> None:
|
||||
"""Test vector similarity with score by vector."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
index_to_id = docsearch.index_to_docstore_id
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
index_to_id[0]: Document(page_content="foo"),
|
||||
index_to_id[1]: Document(page_content="bar"),
|
||||
index_to_id[2]: Document(page_content="baz"),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
query_vec = await FakeEmbeddings().aembed_query(text="foo")
|
||||
output = await docsearch.asimilarity_search_with_score_by_vector(
|
||||
query_vec,
|
||||
k=2,
|
||||
score_threshold=0.2,
|
||||
)
|
||||
assert len(output) == 1
|
||||
assert output[0][0] == Document(page_content="foo")
|
||||
assert output[0][1] < 0.2
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_mmr() -> None:
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
query_vec = FakeEmbeddings().embed_query(text="foo")
|
||||
# make sure we can have k > docstore size
|
||||
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
|
||||
query_vec, k=10, lambda_mult=0.1
|
||||
)
|
||||
assert len(output) == len(texts)
|
||||
assert output[0][0] == Document(page_content="foo")
|
||||
assert output[0][1] == 0.0
|
||||
assert output[1][0] != Document(page_content="foo")
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_mmr() -> None:
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
query_vec = await FakeEmbeddings().aembed_query(text="foo")
|
||||
# make sure we can have k > docstore size
|
||||
output = await docsearch.amax_marginal_relevance_search_with_score_by_vector(
|
||||
query_vec, k=10, lambda_mult=0.1
|
||||
)
|
||||
assert len(output) == len(texts)
|
||||
assert output[0][0] == Document(page_content="foo")
|
||||
assert output[0][1] == 0.0
|
||||
assert output[1][0] != Document(page_content="foo")
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_mmr_with_metadatas() -> None:
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
query_vec = FakeEmbeddings().embed_query(text="foo")
|
||||
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
|
||||
query_vec, k=10, lambda_mult=0.1
|
||||
)
|
||||
assert len(output) == len(texts)
|
||||
assert output[0][0] == Document(page_content="foo", metadata={"page": 0})
|
||||
assert output[0][1] == 0.0
|
||||
assert output[1][0] != Document(page_content="foo", metadata={"page": 0})
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_mmr_with_metadatas() -> None:
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
query_vec = await FakeEmbeddings().aembed_query(text="foo")
|
||||
output = await docsearch.amax_marginal_relevance_search_with_score_by_vector(
|
||||
query_vec, k=10, lambda_mult=0.1
|
||||
)
|
||||
assert len(output) == len(texts)
|
||||
assert output[0][0] == Document(page_content="foo", metadata={"page": 0})
|
||||
assert output[0][1] == 0.0
|
||||
assert output[1][0] != Document(page_content="foo", metadata={"page": 0})
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_mmr_with_metadatas_and_filter() -> None:
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
query_vec = FakeEmbeddings().embed_query(text="foo")
|
||||
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
|
||||
query_vec, k=10, lambda_mult=0.1, filter={"page": 1}
|
||||
)
|
||||
assert len(output) == 1
|
||||
assert output[0][0] == Document(page_content="foo", metadata={"page": 1})
|
||||
assert output[0][1] == 0.0
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_mmr_with_metadatas_and_filter() -> None:
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
query_vec = await FakeEmbeddings().aembed_query(text="foo")
|
||||
output = await docsearch.amax_marginal_relevance_search_with_score_by_vector(
|
||||
query_vec, k=10, lambda_mult=0.1, filter={"page": 1}
|
||||
)
|
||||
assert len(output) == 1
|
||||
assert output[0][0] == Document(page_content="foo", metadata={"page": 1})
|
||||
assert output[0][1] == 0.0
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_mmr_with_metadatas_and_list_filter() -> None:
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
query_vec = FakeEmbeddings().embed_query(text="foo")
|
||||
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
|
||||
query_vec, k=10, lambda_mult=0.1, filter={"page": [0, 1, 2]}
|
||||
)
|
||||
assert len(output) == 3
|
||||
assert output[0][0] == Document(page_content="foo", metadata={"page": 0})
|
||||
assert output[0][1] == 0.0
|
||||
assert output[1][0] != Document(page_content="foo", metadata={"page": 0})
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_mmr_with_metadatas_and_list_filter() -> None:
|
||||
texts = ["foo", "foo", "fou", "foy"]
|
||||
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
query_vec = await FakeEmbeddings().aembed_query(text="foo")
|
||||
output = await docsearch.amax_marginal_relevance_search_with_score_by_vector(
|
||||
query_vec, k=10, lambda_mult=0.1, filter={"page": [0, 1, 2]}
|
||||
)
|
||||
assert len(output) == 3
|
||||
assert output[0][0] == Document(page_content="foo", metadata={"page": 0})
|
||||
assert output[0][1] == 0.0
|
||||
assert output[1][0] != Document(page_content="foo", metadata={"page": 0})
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_with_metadatas() -> None:
|
||||
"""Test end to end construction and search."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
docsearch.index_to_docstore_id[0]: Document(
|
||||
page_content="foo", metadata={"page": 0}
|
||||
),
|
||||
docsearch.index_to_docstore_id[1]: Document(
|
||||
page_content="bar", metadata={"page": 1}
|
||||
),
|
||||
docsearch.index_to_docstore_id[2]: Document(
|
||||
page_content="baz", metadata={"page": 2}
|
||||
),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
output = docsearch.similarity_search("foo", k=1)
|
||||
assert output == [Document(page_content="foo", metadata={"page": 0})]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_with_metadatas() -> None:
|
||||
"""Test end to end construction and search."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
docsearch.index_to_docstore_id[0]: Document(
|
||||
page_content="foo", metadata={"page": 0}
|
||||
),
|
||||
docsearch.index_to_docstore_id[1]: Document(
|
||||
page_content="bar", metadata={"page": 1}
|
||||
),
|
||||
docsearch.index_to_docstore_id[2]: Document(
|
||||
page_content="baz", metadata={"page": 2}
|
||||
),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
output = await docsearch.asimilarity_search("foo", k=1)
|
||||
assert output == [Document(page_content="foo", metadata={"page": 0})]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_with_metadatas_and_filter() -> None:
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
docsearch.index_to_docstore_id[0]: Document(
|
||||
page_content="foo", metadata={"page": 0}
|
||||
),
|
||||
docsearch.index_to_docstore_id[1]: Document(
|
||||
page_content="bar", metadata={"page": 1}
|
||||
),
|
||||
docsearch.index_to_docstore_id[2]: Document(
|
||||
page_content="baz", metadata={"page": 2}
|
||||
),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
output = docsearch.similarity_search("foo", k=1, filter={"page": 1})
|
||||
assert output == [Document(page_content="bar", metadata={"page": 1})]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_with_metadatas_and_filter() -> None:
|
||||
texts = ["foo", "bar", "baz"]
|
||||
metadatas = [{"page": i} for i in range(len(texts))]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
docsearch.index_to_docstore_id[0]: Document(
|
||||
page_content="foo", metadata={"page": 0}
|
||||
),
|
||||
docsearch.index_to_docstore_id[1]: Document(
|
||||
page_content="bar", metadata={"page": 1}
|
||||
),
|
||||
docsearch.index_to_docstore_id[2]: Document(
|
||||
page_content="baz", metadata={"page": 2}
|
||||
),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
output = await docsearch.asimilarity_search("foo", k=1, filter={"page": 1})
|
||||
assert output == [Document(page_content="bar", metadata={"page": 1})]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_with_metadatas_and_list_filter() -> None:
|
||||
texts = ["foo", "bar", "baz", "foo", "qux"]
|
||||
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
docsearch.index_to_docstore_id[0]: Document(
|
||||
page_content="foo", metadata={"page": 0}
|
||||
),
|
||||
docsearch.index_to_docstore_id[1]: Document(
|
||||
page_content="bar", metadata={"page": 1}
|
||||
),
|
||||
docsearch.index_to_docstore_id[2]: Document(
|
||||
page_content="baz", metadata={"page": 2}
|
||||
),
|
||||
docsearch.index_to_docstore_id[3]: Document(
|
||||
page_content="foo", metadata={"page": 3}
|
||||
),
|
||||
docsearch.index_to_docstore_id[4]: Document(
|
||||
page_content="qux", metadata={"page": 3}
|
||||
),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
output = docsearch.similarity_search("foor", k=1, filter={"page": [0, 1, 2]})
|
||||
assert output == [Document(page_content="foo", metadata={"page": 0})]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_with_metadatas_and_list_filter() -> None:
|
||||
texts = ["foo", "bar", "baz", "foo", "qux"]
|
||||
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings(), metadatas=metadatas)
|
||||
expected_docstore = InMemoryDocstore(
|
||||
{
|
||||
docsearch.index_to_docstore_id[0]: Document(
|
||||
page_content="foo", metadata={"page": 0}
|
||||
),
|
||||
docsearch.index_to_docstore_id[1]: Document(
|
||||
page_content="bar", metadata={"page": 1}
|
||||
),
|
||||
docsearch.index_to_docstore_id[2]: Document(
|
||||
page_content="baz", metadata={"page": 2}
|
||||
),
|
||||
docsearch.index_to_docstore_id[3]: Document(
|
||||
page_content="foo", metadata={"page": 3}
|
||||
),
|
||||
docsearch.index_to_docstore_id[4]: Document(
|
||||
page_content="qux", metadata={"page": 3}
|
||||
),
|
||||
}
|
||||
)
|
||||
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
|
||||
output = await docsearch.asimilarity_search("foor", k=1, filter={"page": [0, 1, 2]})
|
||||
assert output == [Document(page_content="foo", metadata={"page": 0})]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_search_not_found() -> None:
|
||||
"""Test what happens when document is not found."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
# Get rid of the docstore to purposefully induce errors.
|
||||
docsearch.docstore = InMemoryDocstore({})
|
||||
with pytest.raises(ValueError):
|
||||
docsearch.similarity_search("foo")
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_search_not_found() -> None:
|
||||
"""Test what happens when document is not found."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
# Get rid of the docstore to purposefully induce errors.
|
||||
docsearch.docstore = InMemoryDocstore({})
|
||||
with pytest.raises(ValueError):
|
||||
await docsearch.asimilarity_search("foo")
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_add_texts() -> None:
|
||||
"""Test end to end adding of texts."""
|
||||
# Create initial doc store.
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
# Test adding a similar document as before.
|
||||
docsearch.add_texts(["foo"])
|
||||
output = docsearch.similarity_search("foo", k=2)
|
||||
assert output == [Document(page_content="foo"), Document(page_content="foo")]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_add_texts() -> None:
|
||||
"""Test end to end adding of texts."""
|
||||
# Create initial doc store.
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
# Test adding a similar document as before.
|
||||
await docsearch.aadd_texts(["foo"])
|
||||
output = await docsearch.asimilarity_search("foo", k=2)
|
||||
assert output == [Document(page_content="foo"), Document(page_content="foo")]
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_add_texts_not_supported() -> None:
|
||||
"""Test adding of texts to a docstore that doesn't support it."""
|
||||
docsearch = FAISS(FakeEmbeddings(), None, FakeDocstore(), {})
|
||||
with pytest.raises(ValueError):
|
||||
docsearch.add_texts(["foo"])
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_add_texts_not_supported() -> None:
|
||||
"""Test adding of texts to a docstore that doesn't support it."""
|
||||
docsearch = FAISS(FakeEmbeddings(), None, FakeDocstore(), {})
|
||||
with pytest.raises(ValueError):
|
||||
await docsearch.aadd_texts(["foo"])
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_local_save_load() -> None:
|
||||
"""Test end to end serialization."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
|
||||
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
|
||||
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
|
||||
docsearch.save_local(temp_folder)
|
||||
new_docsearch = FAISS.load_local(temp_folder, FakeEmbeddings())
|
||||
assert new_docsearch.index is not None
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_local_save_load() -> None:
|
||||
"""Test end to end serialization."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(texts, FakeEmbeddings())
|
||||
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
|
||||
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
|
||||
docsearch.save_local(temp_folder)
|
||||
new_docsearch = FAISS.load_local(temp_folder, FakeEmbeddings())
|
||||
assert new_docsearch.index is not None
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_similarity_search_with_relevance_scores() -> None:
|
||||
"""Test the similarity search with normalized similarities."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(
|
||||
texts,
|
||||
FakeEmbeddings(),
|
||||
relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2),
|
||||
)
|
||||
outputs = docsearch.similarity_search_with_relevance_scores("foo", k=1)
|
||||
output, score = outputs[0]
|
||||
assert output == Document(page_content="foo")
|
||||
assert score == 1.0
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_similarity_search_with_relevance_scores() -> None:
|
||||
"""Test the similarity search with normalized similarities."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(
|
||||
texts,
|
||||
FakeEmbeddings(),
|
||||
relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2),
|
||||
)
|
||||
outputs = await docsearch.asimilarity_search_with_relevance_scores("foo", k=1)
|
||||
output, score = outputs[0]
|
||||
assert output == Document(page_content="foo")
|
||||
assert score == 1.0
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_similarity_search_with_relevance_scores_with_threshold() -> None:
|
||||
"""Test the similarity search with normalized similarities with score threshold."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(
|
||||
texts,
|
||||
FakeEmbeddings(),
|
||||
relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2),
|
||||
)
|
||||
outputs = docsearch.similarity_search_with_relevance_scores(
|
||||
"foo", k=2, score_threshold=0.5
|
||||
)
|
||||
assert len(outputs) == 1
|
||||
output, score = outputs[0]
|
||||
assert output == Document(page_content="foo")
|
||||
assert score == 1.0
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_asimilarity_search_with_relevance_scores_with_threshold() -> None:
|
||||
"""Test the similarity search with normalized similarities with score threshold."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(
|
||||
texts,
|
||||
FakeEmbeddings(),
|
||||
relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2),
|
||||
)
|
||||
outputs = await docsearch.asimilarity_search_with_relevance_scores(
|
||||
"foo", k=2, score_threshold=0.5
|
||||
)
|
||||
assert len(outputs) == 1
|
||||
output, score = outputs[0]
|
||||
assert output == Document(page_content="foo")
|
||||
assert score == 1.0
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_faiss_invalid_normalize_fn() -> None:
|
||||
"""Test the similarity search with normalized similarities."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = FAISS.from_texts(
|
||||
texts, FakeEmbeddings(), relevance_score_fn=lambda _: 2.0
|
||||
)
|
||||
with pytest.warns(Warning, match="scores must be between"):
|
||||
docsearch.similarity_search_with_relevance_scores("foo", k=1)
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_faiss_async_invalid_normalize_fn() -> None:
|
||||
"""Test the similarity search with normalized similarities."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
docsearch = await FAISS.afrom_texts(
|
||||
texts, FakeEmbeddings(), relevance_score_fn=lambda _: 2.0
|
||||
)
|
||||
with pytest.warns(Warning, match="scores must be between"):
|
||||
await docsearch.asimilarity_search_with_relevance_scores("foo", k=1)
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_missing_normalize_score_fn() -> None:
|
||||
"""Test doesn't perform similarity search without a valid distance strategy."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
faiss_instance = FAISS.from_texts(texts, FakeEmbeddings(), distance_strategy="fake")
|
||||
with pytest.raises(ValueError):
|
||||
faiss_instance.similarity_search_with_relevance_scores("foo", k=2)
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_async_missing_normalize_score_fn() -> None:
|
||||
"""Test doesn't perform similarity search without a valid distance strategy."""
|
||||
texts = ["foo", "bar", "baz"]
|
||||
faiss_instance = await FAISS.afrom_texts(
|
||||
texts, FakeEmbeddings(), distance_strategy="fake"
|
||||
)
|
||||
with pytest.raises(ValueError):
|
||||
await faiss_instance.asimilarity_search_with_relevance_scores("foo", k=2)
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
def test_delete() -> None:
|
||||
"""Test the similarity search with normalized similarities."""
|
||||
ids = ["a", "b", "c"]
|
||||
docsearch = FAISS.from_texts(["foo", "bar", "baz"], FakeEmbeddings(), ids=ids)
|
||||
docsearch.delete(ids[1:2])
|
||||
|
||||
result = docsearch.similarity_search("bar", k=2)
|
||||
assert sorted([d.page_content for d in result]) == ["baz", "foo"]
|
||||
assert docsearch.index_to_docstore_id == {0: ids[0], 1: ids[2]}
|
||||
|
||||
|
||||
@pytest.mark.requires("faiss")
|
||||
async def test_async_delete() -> None:
|
||||
"""Test the similarity search with normalized similarities."""
|
||||
ids = ["a", "b", "c"]
|
||||
docsearch = await FAISS.afrom_texts(
|
||||
["foo", "bar", "baz"], FakeEmbeddings(), ids=ids
|
||||
)
|
||||
docsearch.delete(ids[1:2])
|
||||
|
||||
result = await docsearch.asimilarity_search("bar", k=2)
|
||||
assert sorted([d.page_content for d in result]) == ["baz", "foo"]
|
||||
assert docsearch.index_to_docstore_id == {0: ids[0], 1: ids[2]}
|
||||
@@ -0,0 +1,13 @@
|
||||
from langchain_community import vectorstores
|
||||
from langchain_core.vectorstores import VectorStore
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
"""Simple test to make sure all things can be imported."""
|
||||
for cls in vectorstores.__all__:
|
||||
if cls not in [
|
||||
"AlibabaCloudOpenSearchSettings",
|
||||
"ClickhouseSettings",
|
||||
"MyScaleSettings",
|
||||
]:
|
||||
assert issubclass(getattr(vectorstores, cls), VectorStore)
|
||||
@@ -0,0 +1,49 @@
|
||||
"""
|
||||
**Utility functions** for LangChain.
|
||||
|
||||
These functions do not depend on any other LangChain module.
|
||||
"""
|
||||
|
||||
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
|
||||
from langchain_core.utils.formatting import StrictFormatter, formatter
|
||||
from langchain_core.utils.input import (
|
||||
get_bolded_text,
|
||||
get_color_mapping,
|
||||
get_colored_text,
|
||||
print_text,
|
||||
)
|
||||
from langchain_core.utils.loading import try_load_from_hub
|
||||
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
|
||||
from langchain_core.utils.utils import (
|
||||
build_extra_kwargs,
|
||||
check_package_version,
|
||||
convert_to_secret_str,
|
||||
get_pydantic_field_names,
|
||||
guard_import,
|
||||
mock_now,
|
||||
raise_for_status_with_text,
|
||||
xor_args,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"StrictFormatter",
|
||||
"check_package_version",
|
||||
"convert_to_secret_str",
|
||||
"formatter",
|
||||
"get_bolded_text",
|
||||
"get_color_mapping",
|
||||
"get_colored_text",
|
||||
"get_pydantic_field_names",
|
||||
"guard_import",
|
||||
"mock_now",
|
||||
"print_text",
|
||||
"raise_for_status_with_text",
|
||||
"xor_args",
|
||||
"try_load_from_hub",
|
||||
"build_extra_kwargs",
|
||||
"get_from_env",
|
||||
"get_from_dict_or_env",
|
||||
"stringify_dict",
|
||||
"comma_list",
|
||||
"stringify_value",
|
||||
]
|
||||
@@ -0,0 +1,45 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
def env_var_is_set(env_var: str) -> bool:
|
||||
"""Check if an environment variable is set.
|
||||
|
||||
Args:
|
||||
env_var (str): The name of the environment variable.
|
||||
|
||||
Returns:
|
||||
bool: True if the environment variable is set, False otherwise.
|
||||
"""
|
||||
return env_var in os.environ and os.environ[env_var] not in (
|
||||
"",
|
||||
"0",
|
||||
"false",
|
||||
"False",
|
||||
)
|
||||
|
||||
|
||||
def get_from_dict_or_env(
|
||||
data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None
|
||||
) -> str:
|
||||
"""Get a value from a dictionary or an environment variable."""
|
||||
if key in data and data[key]:
|
||||
return data[key]
|
||||
else:
|
||||
return get_from_env(key, env_key, default=default)
|
||||
|
||||
|
||||
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
|
||||
"""Get a value from a dictionary or an environment variable."""
|
||||
if env_key in os.environ and os.environ[env_key]:
|
||||
return os.environ[env_key]
|
||||
elif default is not None:
|
||||
return default
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Did not find {key}, please add an environment variable"
|
||||
f" `{env_key}` which contains it, or pass"
|
||||
f" `{key}` as a named parameter."
|
||||
)
|
||||
@@ -0,0 +1,28 @@
|
||||
from langchain_core.utils import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"StrictFormatter",
|
||||
"check_package_version",
|
||||
"convert_to_secret_str",
|
||||
"formatter",
|
||||
"get_bolded_text",
|
||||
"get_color_mapping",
|
||||
"get_colored_text",
|
||||
"get_pydantic_field_names",
|
||||
"guard_import",
|
||||
"mock_now",
|
||||
"print_text",
|
||||
"raise_for_status_with_text",
|
||||
"xor_args",
|
||||
"try_load_from_hub",
|
||||
"build_extra_kwargs",
|
||||
"get_from_dict_or_env",
|
||||
"get_from_env",
|
||||
"stringify_dict",
|
||||
"comma_list",
|
||||
"stringify_value"
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -0,0 +1,83 @@
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
|
||||
from langchain_core.callbacks import StdOutCallbackHandler, StreamingStdOutCallbackHandler
|
||||
from langchain_core.tracers.langchain import LangChainTracer
|
||||
from langchain_core.tracers.context import (
|
||||
collect_runs,
|
||||
tracing_enabled,
|
||||
tracing_v2_enabled,
|
||||
)
|
||||
|
||||
from langchain_community.callbacks.aim_callback import AimCallbackHandler
|
||||
from langchain_community.callbacks.argilla_callback import ArgillaCallbackHandler
|
||||
from langchain_community.callbacks.arize_callback import ArizeCallbackHandler
|
||||
from langchain_community.callbacks.arthur_callback import ArthurCallbackHandler
|
||||
from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler
|
||||
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
|
||||
from langchain_community.callbacks.context_callback import ContextCallbackHandler
|
||||
from langchain_community.callbacks.file import FileCallbackHandler
|
||||
from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler
|
||||
from langchain_community.callbacks.human import HumanApprovalCallbackHandler
|
||||
from langchain_community.callbacks.infino_callback import InfinoCallbackHandler
|
||||
from langchain_community.callbacks.labelstudio_callback import LabelStudioCallbackHandler
|
||||
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler
|
||||
from langchain_community.callbacks.mlflow_callback import MlflowCallbackHandler
|
||||
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain_community.callbacks.promptlayer_callback import PromptLayerCallbackHandler
|
||||
from langchain_community.callbacks.sagemaker_callback import SageMakerCallbackHandler
|
||||
from langchain_community.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
||||
from langchain_community.callbacks.streaming_stdout_final_only import (
|
||||
FinalStreamingStdOutCallbackHandler,
|
||||
)
|
||||
from langchain_community.callbacks.streamlit import LLMThoughtLabeler, StreamlitCallbackHandler
|
||||
from langchain_community.callbacks.trubrics_callback import TrubricsCallbackHandler
|
||||
from langchain_community.callbacks.wandb_callback import WandbCallbackHandler
|
||||
from langchain_community.callbacks.whylabs_callback import WhyLabsCallbackHandler
|
||||
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AimCallbackHandler",
|
||||
"ArgillaCallbackHandler",
|
||||
"ArizeCallbackHandler",
|
||||
"PromptLayerCallbackHandler",
|
||||
"ArthurCallbackHandler",
|
||||
"ClearMLCallbackHandler",
|
||||
"CometCallbackHandler",
|
||||
"ContextCallbackHandler",
|
||||
"FileCallbackHandler",
|
||||
"HumanApprovalCallbackHandler",
|
||||
"InfinoCallbackHandler",
|
||||
"MlflowCallbackHandler",
|
||||
"LLMonitorCallbackHandler",
|
||||
"OpenAICallbackHandler",
|
||||
"StdOutCallbackHandler",
|
||||
"AsyncIteratorCallbackHandler",
|
||||
"StreamingStdOutCallbackHandler",
|
||||
"FinalStreamingStdOutCallbackHandler",
|
||||
"LLMThoughtLabeler",
|
||||
"LangChainTracer",
|
||||
"StreamlitCallbackHandler",
|
||||
"WandbCallbackHandler",
|
||||
"WhyLabsCallbackHandler",
|
||||
"get_openai_callback",
|
||||
"tracing_enabled",
|
||||
"tracing_v2_enabled",
|
||||
"collect_runs",
|
||||
"wandb_tracing_enabled",
|
||||
"FlyteCallbackHandler",
|
||||
"SageMakerCallbackHandler",
|
||||
"LabelStudioCallbackHandler",
|
||||
"TrubricsCallbackHandler",
|
||||
]
|
||||
@@ -0,0 +1,68 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from langchain_core.callbacks.manager import (
|
||||
AsyncCallbackManager,
|
||||
AsyncCallbackManagerForChainGroup,
|
||||
AsyncCallbackManagerForChainRun,
|
||||
AsyncCallbackManagerForLLMRun,
|
||||
AsyncCallbackManagerForRetrieverRun,
|
||||
AsyncCallbackManagerForToolRun,
|
||||
AsyncParentRunManager,
|
||||
AsyncRunManager,
|
||||
BaseRunManager,
|
||||
CallbackManager,
|
||||
CallbackManagerForChainGroup,
|
||||
CallbackManagerForChainRun,
|
||||
CallbackManagerForLLMRun,
|
||||
CallbackManagerForRetrieverRun,
|
||||
CallbackManagerForToolRun,
|
||||
Callbacks,
|
||||
ParentRunManager,
|
||||
RunManager,
|
||||
ahandle_event,
|
||||
atrace_as_chain_group,
|
||||
handle_event,
|
||||
trace_as_chain_group,
|
||||
)
|
||||
from langchain_core.tracers.context import (
|
||||
collect_runs,
|
||||
tracing_enabled,
|
||||
tracing_v2_enabled,
|
||||
)
|
||||
from langchain_core.utils.env import env_var_is_set
|
||||
from langchain_community.callbacks.manager import (
|
||||
get_openai_callback,
|
||||
wandb_tracing_enabled,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BaseRunManager",
|
||||
"RunManager",
|
||||
"ParentRunManager",
|
||||
"AsyncRunManager",
|
||||
"AsyncParentRunManager",
|
||||
"CallbackManagerForLLMRun",
|
||||
"AsyncCallbackManagerForLLMRun",
|
||||
"CallbackManagerForChainRun",
|
||||
"AsyncCallbackManagerForChainRun",
|
||||
"CallbackManagerForToolRun",
|
||||
"AsyncCallbackManagerForToolRun",
|
||||
"CallbackManagerForRetrieverRun",
|
||||
"AsyncCallbackManagerForRetrieverRun",
|
||||
"CallbackManager",
|
||||
"CallbackManagerForChainGroup",
|
||||
"AsyncCallbackManager",
|
||||
"AsyncCallbackManagerForChainGroup",
|
||||
"tracing_enabled",
|
||||
"tracing_v2_enabled",
|
||||
"collect_runs",
|
||||
"atrace_as_chain_group",
|
||||
"trace_as_chain_group",
|
||||
"handle_event",
|
||||
"ahandle_event",
|
||||
"Callbacks",
|
||||
"env_var_is_set",
|
||||
"get_openai_callback",
|
||||
"wandb_tracing_enabled",
|
||||
]
|
||||
@@ -0,0 +1,36 @@
|
||||
from langchain.callbacks.manager import __all__
|
||||
|
||||
EXPECTED_ALL = [
|
||||
"BaseRunManager",
|
||||
"RunManager",
|
||||
"ParentRunManager",
|
||||
"AsyncRunManager",
|
||||
"AsyncParentRunManager",
|
||||
"CallbackManagerForLLMRun",
|
||||
"AsyncCallbackManagerForLLMRun",
|
||||
"CallbackManagerForChainRun",
|
||||
"AsyncCallbackManagerForChainRun",
|
||||
"CallbackManagerForToolRun",
|
||||
"AsyncCallbackManagerForToolRun",
|
||||
"CallbackManagerForRetrieverRun",
|
||||
"AsyncCallbackManagerForRetrieverRun",
|
||||
"CallbackManager",
|
||||
"CallbackManagerForChainGroup",
|
||||
"AsyncCallbackManager",
|
||||
"AsyncCallbackManagerForChainGroup",
|
||||
"tracing_enabled",
|
||||
"tracing_v2_enabled",
|
||||
"collect_runs",
|
||||
"atrace_as_chain_group",
|
||||
"trace_as_chain_group",
|
||||
"handle_event",
|
||||
"ahandle_event",
|
||||
"env_var_is_set",
|
||||
"Callbacks",
|
||||
"get_openai_callback",
|
||||
"wandb_tracing_enabled",
|
||||
]
|
||||
|
||||
|
||||
def test_all_imports() -> None:
|
||||
assert set(__all__) == set(EXPECTED_ALL)
|
||||
@@ -0,0 +1,75 @@
|
||||
"""Test LLM chain."""
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Dict, List, Union
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from langchain_core.output_parsers import BaseOutputParser
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
|
||||
|
||||
class FakeOutputParser(BaseOutputParser):
|
||||
"""Fake output parser class for testing."""
|
||||
|
||||
def parse(self, text: str) -> Union[str, List[str], Dict[str, str]]:
|
||||
"""Parse by splitting."""
|
||||
return text.split()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_llm_chain() -> LLMChain:
|
||||
"""Fake LLM chain for testing purposes."""
|
||||
prompt = PromptTemplate(input_variables=["bar"], template="This is a {bar}:")
|
||||
return LLMChain(prompt=prompt, llm=FakeLLM(), output_key="text1")
|
||||
|
||||
|
||||
@patch(
|
||||
"langchain_community.llms.loading.get_type_to_cls_dict",
|
||||
lambda: {"fake": lambda: FakeLLM},
|
||||
)
|
||||
def test_serialization(fake_llm_chain: LLMChain) -> None:
|
||||
"""Test serialization."""
|
||||
from langchain.chains.loading import load_chain
|
||||
|
||||
with TemporaryDirectory() as temp_dir:
|
||||
file = temp_dir + "/llm.json"
|
||||
fake_llm_chain.save(file)
|
||||
loaded_chain = load_chain(file)
|
||||
assert loaded_chain == fake_llm_chain
|
||||
|
||||
|
||||
def test_missing_inputs(fake_llm_chain: LLMChain) -> None:
|
||||
"""Test error is raised if inputs are missing."""
|
||||
with pytest.raises(ValueError):
|
||||
fake_llm_chain({"foo": "bar"})
|
||||
|
||||
|
||||
def test_valid_call(fake_llm_chain: LLMChain) -> None:
|
||||
"""Test valid call of LLM chain."""
|
||||
output = fake_llm_chain({"bar": "baz"})
|
||||
assert output == {"bar": "baz", "text1": "foo"}
|
||||
|
||||
# Test with stop words.
|
||||
output = fake_llm_chain({"bar": "baz", "stop": ["foo"]})
|
||||
# Response should be `bar` now.
|
||||
assert output == {"bar": "baz", "stop": ["foo"], "text1": "bar"}
|
||||
|
||||
|
||||
def test_predict_method(fake_llm_chain: LLMChain) -> None:
|
||||
"""Test predict method works."""
|
||||
output = fake_llm_chain.predict(bar="baz")
|
||||
assert output == "foo"
|
||||
|
||||
|
||||
def test_predict_and_parse() -> None:
|
||||
"""Test parsing ability."""
|
||||
prompt = PromptTemplate(
|
||||
input_variables=["foo"], template="{foo}", output_parser=FakeOutputParser()
|
||||
)
|
||||
llm = FakeLLM(queries={"foo": "foo bar"})
|
||||
chain = LLMChain(prompt=prompt, llm=llm)
|
||||
output = chain.predict_and_parse(foo="foo")
|
||||
assert output == ["foo", "bar"]
|
||||
@@ -0,0 +1,114 @@
|
||||
"""A unit test meant to catch accidental introduction of non-optional dependencies."""
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Mapping
|
||||
|
||||
import pytest
|
||||
import toml
|
||||
|
||||
HERE = Path(__file__).parent
|
||||
|
||||
PYPROJECT_TOML = HERE / "../../pyproject.toml"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def poetry_conf() -> Dict[str, Any]:
|
||||
"""Load the pyproject.toml file."""
|
||||
with open(PYPROJECT_TOML) as f:
|
||||
return toml.load(f)["tool"]["poetry"]
|
||||
|
||||
|
||||
def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
|
||||
"""A test that checks if a new non-optional dependency is being introduced.
|
||||
|
||||
If this test is triggered, it means that a contributor is trying to introduce a new
|
||||
required dependency. This should be avoided in most situations.
|
||||
"""
|
||||
# Get the dependencies from the [tool.poetry.dependencies] section
|
||||
dependencies = poetry_conf["dependencies"]
|
||||
|
||||
is_required = {
|
||||
package_name: isinstance(requirements, str)
|
||||
or not requirements.get("optional", False)
|
||||
for package_name, requirements in dependencies.items()
|
||||
}
|
||||
required_dependencies = [
|
||||
package_name for package_name, required in is_required.items() if required
|
||||
]
|
||||
|
||||
assert sorted(required_dependencies) == sorted(
|
||||
[
|
||||
"PyYAML",
|
||||
"SQLAlchemy",
|
||||
"aiohttp",
|
||||
"async-timeout",
|
||||
"dataclasses-json",
|
||||
"jsonpatch",
|
||||
"langchain-core",
|
||||
"langsmith",
|
||||
"numpy",
|
||||
"pydantic",
|
||||
"python",
|
||||
"requests",
|
||||
"tenacity",
|
||||
"langchain-community",
|
||||
"langchain-openai",
|
||||
]
|
||||
)
|
||||
|
||||
unrequired_dependencies = [
|
||||
package_name for package_name, required in is_required.items() if not required
|
||||
]
|
||||
in_extras = [dep for group in poetry_conf["extras"].values() for dep in group]
|
||||
assert set(unrequired_dependencies) == set(in_extras)
|
||||
|
||||
|
||||
def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
|
||||
"""Check if someone is attempting to add additional test dependencies.
|
||||
|
||||
Only dependencies associated with test running infrastructure should be added
|
||||
to the test group; e.g., pytest, pytest-cov etc.
|
||||
|
||||
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
|
||||
"""
|
||||
|
||||
test_group_deps = sorted(poetry_conf["group"]["test"]["dependencies"])
|
||||
|
||||
assert test_group_deps == sorted(
|
||||
[
|
||||
"duckdb-engine",
|
||||
"freezegun",
|
||||
"langchain-core",
|
||||
"lark",
|
||||
"pandas",
|
||||
"pytest",
|
||||
"pytest-asyncio",
|
||||
"pytest-cov",
|
||||
"pytest-dotenv",
|
||||
"pytest-mock",
|
||||
"pytest-socket",
|
||||
"pytest-watcher",
|
||||
"responses",
|
||||
"syrupy",
|
||||
"requests-mock",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_imports() -> None:
|
||||
"""Test that you can import all top level things okay."""
|
||||
from langchain_core.prompts import BasePromptTemplate # noqa: F401
|
||||
|
||||
from langchain.agents import OpenAIFunctionsAgent # noqa: F401
|
||||
from langchain.callbacks import OpenAICallbackHandler # noqa: F401
|
||||
from langchain.chains import LLMChain # noqa: F401
|
||||
from langchain.chat_models import ChatOpenAI # noqa: F401
|
||||
from langchain.document_loaders import BSHTMLLoader # noqa: F401
|
||||
from langchain.embeddings import OpenAIEmbeddings # noqa: F401
|
||||
from langchain.llms import OpenAI # noqa: F401
|
||||
from langchain.retrievers import VespaRetriever # noqa: F401
|
||||
from langchain.tools import DuckDuckGoSearchResults # noqa: F401
|
||||
from langchain.utilities import (
|
||||
SearchApiAPIWrapper, # noqa: F401
|
||||
SerpAPIWrapper, # noqa: F401
|
||||
)
|
||||
from langchain.vectorstores import FAISS # noqa: F401
|
||||
@@ -0,0 +1,20 @@
|
||||
from langchain_openai.chat_models import AzureChatOpenAI, ChatOpenAI, _import_tiktoken
|
||||
from langchain_openai.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
|
||||
from langchain_openai.functions import (
|
||||
convert_pydantic_to_openai_function,
|
||||
convert_pydantic_to_openai_tool,
|
||||
)
|
||||
from langchain_openai.llms import AzureOpenAI, BaseOpenAI, OpenAI
|
||||
|
||||
__all__ = [
|
||||
"_import_tiktoken",
|
||||
"OpenAI",
|
||||
"AzureOpenAI",
|
||||
"ChatOpenAI",
|
||||
"AzureChatOpenAI",
|
||||
"OpenAIEmbeddings",
|
||||
"AzureOpenAIEmbeddings",
|
||||
"convert_pydantic_to_openai_function",
|
||||
"convert_pydantic_to_openai_tool",
|
||||
"BaseOpenAI",
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
from langchain_openai.chat_models.azure import AzureChatOpenAI
|
||||
from langchain_openai.chat_models.base import (
|
||||
ChatOpenAI,
|
||||
_convert_delta_to_message_chunk,
|
||||
_create_retry_decorator,
|
||||
_import_tiktoken,
|
||||
acompletion_with_retry,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"_create_retry_decorator",
|
||||
"acompletion_with_retry",
|
||||
"_convert_delta_to_message_chunk",
|
||||
"_import_tiktoken",
|
||||
"ChatOpenAI",
|
||||
"AzureChatOpenAI",
|
||||
]
|
||||
@@ -0,0 +1,21 @@
|
||||
from langchain_openai.embeddings.azure import AzureOpenAIEmbeddings
|
||||
from langchain_openai.embeddings.base import (
|
||||
OpenAIEmbeddings,
|
||||
_async_retry_decorator,
|
||||
_check_response,
|
||||
_create_retry_decorator,
|
||||
_is_openai_v1,
|
||||
async_embed_with_retry,
|
||||
embed_with_retry,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"_create_retry_decorator",
|
||||
"_async_retry_decorator",
|
||||
"_check_response",
|
||||
"embed_with_retry",
|
||||
"async_embed_with_retry",
|
||||
"_is_openai_v1",
|
||||
"OpenAIEmbeddings",
|
||||
"AzureOpenAIEmbeddings",
|
||||
]
|
||||
@@ -0,0 +1,27 @@
|
||||
from langchain_openai.llms.base import (
|
||||
AzureOpenAI,
|
||||
BaseOpenAI,
|
||||
OpenAI,
|
||||
OpenAIChat,
|
||||
_create_retry_decorator,
|
||||
_stream_response_to_generation_chunk,
|
||||
_streaming_response_template,
|
||||
_update_response,
|
||||
acompletion_with_retry,
|
||||
completion_with_retry,
|
||||
update_token_usage,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"update_token_usage",
|
||||
"_stream_response_to_generation_chunk",
|
||||
"_update_response",
|
||||
"_streaming_response_template",
|
||||
"_create_retry_decorator",
|
||||
"completion_with_retry",
|
||||
"acompletion_with_retry",
|
||||
"OpenAIChat",
|
||||
"OpenAI",
|
||||
"AzureOpenAI",
|
||||
"BaseOpenAI",
|
||||
]
|
||||
@@ -6,9 +6,7 @@ import pytest
|
||||
from langchain_core.messages import BaseMessage, HumanMessage
|
||||
from langchain_core.outputs import ChatGeneration, ChatResult, LLMResult
|
||||
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.chat_models import AzureChatOpenAI
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||
from langchain_openai.chat_models import AzureChatOpenAI
|
||||
|
||||
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
|
||||
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
|
||||
@@ -71,52 +69,6 @@ def test_chat_openai_multiple_completions() -> None:
|
||||
assert isinstance(generation.message.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_openai_streaming() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = _get_llm(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = chat([message])
|
||||
assert callback_handler.llm_streams > 0
|
||||
assert isinstance(response, BaseMessage)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_openai_streaming_generation_info() -> None:
|
||||
"""Test that generation info is preserved when streaming."""
|
||||
|
||||
class _FakeCallback(FakeCallbackHandler):
|
||||
saved_things: dict = {}
|
||||
|
||||
def on_llm_end(
|
||||
self,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
# Save the generation
|
||||
self.saved_things["generation"] = args[0]
|
||||
|
||||
callback = _FakeCallback()
|
||||
callback_manager = CallbackManager([callback])
|
||||
chat = _get_llm(
|
||||
max_tokens=2,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
list(chat.stream("hi"))
|
||||
generation = callback.saved_things["generation"]
|
||||
# `Hello!` is two tokens, assert that that is what is returned
|
||||
assert generation.generations[0][0].text == "Hello!"
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_async_chat_openai() -> None:
|
||||
"""Test async generation."""
|
||||
@@ -133,31 +85,6 @@ async def test_async_chat_openai() -> None:
|
||||
assert generation.text == generation.message.content
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_async_chat_openai_streaming() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = _get_llm(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = await chat.agenerate([[message], [message]])
|
||||
assert callback_handler.llm_streams > 0
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.generations) == 2
|
||||
for generations in response.generations:
|
||||
assert len(generations) == 1
|
||||
for generation in generations:
|
||||
assert isinstance(generation, ChatGeneration)
|
||||
assert isinstance(generation.text, str)
|
||||
assert generation.text == generation.message.content
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_openai_streaming(llm: AzureChatOpenAI) -> None:
|
||||
"""Test streaming tokens from OpenAI."""
|
||||
@@ -1,26 +1,16 @@
|
||||
"""Test ChatOpenAI wrapper."""
|
||||
from typing import Any, List, Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
|
||||
from langchain_core.outputs import (
|
||||
ChatGeneration,
|
||||
ChatGenerationChunk,
|
||||
ChatResult,
|
||||
GenerationChunk,
|
||||
LLMResult,
|
||||
)
|
||||
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
|
||||
from langchain.callbacks.base import AsyncCallbackHandler
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.chains.openai_functions import (
|
||||
create_openai_fn_chain,
|
||||
)
|
||||
from langchain.chat_models.openai import ChatOpenAI
|
||||
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@@ -94,51 +84,6 @@ def test_chat_openai_multiple_completions() -> None:
|
||||
assert isinstance(generation.message.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_openai_streaming() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = ChatOpenAI(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = chat([message])
|
||||
assert callback_handler.llm_streams > 0
|
||||
assert isinstance(response, BaseMessage)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_openai_streaming_generation_info() -> None:
|
||||
"""Test that generation info is preserved when streaming."""
|
||||
|
||||
class _FakeCallback(FakeCallbackHandler):
|
||||
saved_things: dict = {}
|
||||
|
||||
def on_llm_end(
|
||||
self,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
# Save the generation
|
||||
self.saved_things["generation"] = args[0]
|
||||
|
||||
callback = _FakeCallback()
|
||||
callback_manager = CallbackManager([callback])
|
||||
chat = ChatOpenAI(
|
||||
max_tokens=2,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
list(chat.stream("hi"))
|
||||
generation = callback.saved_things["generation"]
|
||||
# `Hello!` is two tokens, assert that that is what is returned
|
||||
assert generation.generations[0][0].text == "Hello!"
|
||||
|
||||
|
||||
def test_chat_openai_llm_output_contains_model_name() -> None:
|
||||
"""Test llm_output contains model_name."""
|
||||
@@ -187,131 +132,6 @@ async def test_async_chat_openai() -> None:
|
||||
assert generation.text == generation.message.content
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_async_chat_openai_streaming() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = ChatOpenAI(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = await chat.agenerate([[message], [message]])
|
||||
assert callback_handler.llm_streams > 0
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.generations) == 2
|
||||
for generations in response.generations:
|
||||
assert len(generations) == 1
|
||||
for generation in generations:
|
||||
assert isinstance(generation, ChatGeneration)
|
||||
assert isinstance(generation.text, str)
|
||||
assert generation.text == generation.message.content
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_async_chat_openai_streaming_with_function() -> None:
|
||||
"""Test ChatOpenAI wrapper with multiple completions."""
|
||||
|
||||
class MyCustomAsyncHandler(AsyncCallbackHandler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._captured_tokens: List[str] = []
|
||||
self._captured_chunks: List[
|
||||
Optional[Union[ChatGenerationChunk, GenerationChunk]]
|
||||
] = []
|
||||
|
||||
def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
*,
|
||||
chunk: Optional[Union[ChatGenerationChunk, GenerationChunk]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
self._captured_tokens.append(token)
|
||||
self._captured_chunks.append(chunk)
|
||||
|
||||
json_schema = {
|
||||
"title": "Person",
|
||||
"description": "Identifying information about a person.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"title": "Name",
|
||||
"description": "The person's name",
|
||||
"type": "string",
|
||||
},
|
||||
"age": {
|
||||
"title": "Age",
|
||||
"description": "The person's age",
|
||||
"type": "integer",
|
||||
},
|
||||
"fav_food": {
|
||||
"title": "Fav Food",
|
||||
"description": "The person's favorite food",
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
"required": ["name", "age"],
|
||||
}
|
||||
|
||||
callback_handler = MyCustomAsyncHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
|
||||
chat = ChatOpenAI(
|
||||
max_tokens=10,
|
||||
n=1,
|
||||
callback_manager=callback_manager,
|
||||
streaming=True,
|
||||
)
|
||||
|
||||
prompt_msgs = [
|
||||
SystemMessage(
|
||||
content="You are a world class algorithm for "
|
||||
"extracting information in structured formats."
|
||||
),
|
||||
HumanMessage(
|
||||
content="Use the given format to extract "
|
||||
"information from the following input:"
|
||||
),
|
||||
HumanMessagePromptTemplate.from_template("{input}"),
|
||||
HumanMessage(content="Tips: Make sure to answer in the correct format"),
|
||||
]
|
||||
prompt = ChatPromptTemplate(messages=prompt_msgs)
|
||||
|
||||
function: Any = {
|
||||
"name": "output_formatter",
|
||||
"description": (
|
||||
"Output formatter. Should always be used to format your response to the"
|
||||
" user."
|
||||
),
|
||||
"parameters": json_schema,
|
||||
}
|
||||
chain = create_openai_fn_chain(
|
||||
[function],
|
||||
chat,
|
||||
prompt,
|
||||
output_parser=None,
|
||||
)
|
||||
|
||||
message = HumanMessage(content="Sally is 13 years old")
|
||||
response = await chain.agenerate([{"input": message}])
|
||||
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.generations) == 1
|
||||
for generations in response.generations:
|
||||
assert len(generations) == 1
|
||||
for generation in generations:
|
||||
assert isinstance(generation, ChatGeneration)
|
||||
assert isinstance(generation.text, str)
|
||||
assert generation.text == generation.message.content
|
||||
assert len(callback_handler._captured_tokens) > 0
|
||||
assert len(callback_handler._captured_chunks) > 0
|
||||
assert all([chunk is not None for chunk in callback_handler._captured_chunks])
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_async_chat_openai_bind_functions() -> None:
|
||||
@@ -339,17 +159,13 @@ async def test_async_chat_openai_bind_functions() -> None:
|
||||
]
|
||||
)
|
||||
|
||||
chain = prompt | chat | JsonOutputFunctionsParser(args_only=True)
|
||||
chain = prompt | chat
|
||||
|
||||
message = HumanMessage(content="Sally is 13 years old")
|
||||
response = await chain.abatch([{"input": message}])
|
||||
|
||||
assert isinstance(response, list)
|
||||
assert len(response) == 1
|
||||
for generation in response:
|
||||
assert isinstance(generation, dict)
|
||||
assert "name" in generation
|
||||
assert "age" in generation
|
||||
|
||||
|
||||
def test_chat_openai_extra_kwargs() -> None:
|
||||
@@ -5,9 +5,7 @@ from typing import Any, Generator
|
||||
import pytest
|
||||
from langchain_core.outputs import LLMResult
|
||||
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.llms import AzureOpenAI
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
||||
from langchain_openai.llms import AzureOpenAI
|
||||
|
||||
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
|
||||
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
|
||||
@@ -135,40 +133,9 @@ def test_openai_streaming_call() -> None:
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def test_openai_streaming_callback() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
llm = _get_llm(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
llm("Write me a sentence with 100 words.")
|
||||
assert callback_handler.llm_streams == 11
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_openai_async_generate() -> None:
|
||||
"""Test async generation."""
|
||||
llm = _get_llm(max_tokens=10)
|
||||
output = await llm.agenerate(["Hello, how are you?"])
|
||||
assert isinstance(output, LLMResult)
|
||||
|
||||
|
||||
async def test_openai_async_streaming_callback() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
llm = _get_llm(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
result = await llm.agenerate(["Write me a sentence with 100 words."])
|
||||
assert callback_handler.llm_streams == 11
|
||||
assert isinstance(result, LLMResult)
|
||||
@@ -1,17 +1,11 @@
|
||||
"""Test OpenAI API wrapper."""
|
||||
from pathlib import Path
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
from langchain_core.outputs import LLMResult
|
||||
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.chat_models.openai import ChatOpenAI
|
||||
from langchain.llms.loading import load_llm
|
||||
from langchain.llms.openai import OpenAI
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import (
|
||||
FakeCallbackHandler,
|
||||
)
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
from langchain_openai.llms import OpenAI
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@@ -79,14 +73,6 @@ def test_openai_stop_error() -> None:
|
||||
llm("write an ordered list of five items", stop=["\n"])
|
||||
|
||||
|
||||
def test_saving_loading_llm(tmp_path: Path) -> None:
|
||||
"""Test saving/loading an OpenAI LLM."""
|
||||
llm = OpenAI(max_tokens=10)
|
||||
llm.save(file_path=tmp_path / "openai.yaml")
|
||||
loaded_llm = load_llm(tmp_path / "openai.yaml")
|
||||
assert loaded_llm == llm
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_openai_streaming() -> None:
|
||||
"""Test streaming tokens from OpenAI."""
|
||||
@@ -193,20 +179,6 @@ def test_openai_streaming_call() -> None:
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def test_openai_streaming_callback() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
llm = OpenAI(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
llm("Write me a sentence with 100 words.")
|
||||
assert callback_handler.llm_streams == 10
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_openai_async_generate() -> None:
|
||||
@@ -216,21 +188,6 @@ async def test_openai_async_generate() -> None:
|
||||
assert isinstance(output, LLMResult)
|
||||
|
||||
|
||||
async def test_openai_async_streaming_callback() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
llm = OpenAI(
|
||||
max_tokens=10,
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
result = await llm.agenerate(["Write me a sentence with 100 words."])
|
||||
assert callback_handler.llm_streams == 10
|
||||
assert isinstance(result, LLMResult)
|
||||
|
||||
|
||||
def test_openai_modelname_to_contextsize_valid() -> None:
|
||||
"""Test model name to context size on a valid model."""
|
||||
@@ -4,15 +4,11 @@ from typing import Any
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from langchain_core.language_models import llms as base
|
||||
from tenacity import wait_none
|
||||
|
||||
from langchain.llms import base
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.utils.openai import is_openai_v1
|
||||
from tests.unit_tests.callbacks.fake_callback_handler import (
|
||||
FakeAsyncCallbackHandler,
|
||||
FakeCallbackHandler,
|
||||
)
|
||||
from langchain_openai.llms import OpenAI
|
||||
from langchain_openai.utils import is_openai_v1
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "foo"
|
||||
|
||||
@@ -24,7 +20,6 @@ def _openai_v1_installed() -> bool:
|
||||
return False
|
||||
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_openai_model_param() -> None:
|
||||
llm = OpenAI(model="foo")
|
||||
assert llm.model_name == "foo"
|
||||
@@ -32,19 +27,16 @@ def test_openai_model_param() -> None:
|
||||
assert llm.model_name == "foo"
|
||||
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_openai_model_kwargs() -> None:
|
||||
llm = OpenAI(model_kwargs={"foo": "bar"})
|
||||
assert llm.model_kwargs == {"foo": "bar"}
|
||||
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_openai_invalid_model_kwargs() -> None:
|
||||
with pytest.raises(ValueError):
|
||||
OpenAI(model_kwargs={"model_name": "foo"})
|
||||
|
||||
|
||||
@pytest.mark.requires("openai")
|
||||
def test_openai_incorrect_field() -> None:
|
||||
with pytest.warns(match="not default parameter"):
|
||||
llm = OpenAI(foo="bar")
|
||||
@@ -78,7 +70,6 @@ def _patched_retry(*args: Any, **kwargs: Any) -> Any:
|
||||
@pytest.mark.skipif(
|
||||
_openai_v1_installed(), reason="Retries only handled by LangChain for openai<1"
|
||||
)
|
||||
@pytest.mark.requires("openai")
|
||||
def test_openai_retries(mock_completion: dict) -> None:
|
||||
llm = OpenAI()
|
||||
mock_client = MagicMock()
|
||||
@@ -95,7 +86,6 @@ def test_openai_retries(mock_completion: dict) -> None:
|
||||
return mock_completion
|
||||
|
||||
mock_client.create = raise_once
|
||||
callback_handler = FakeCallbackHandler()
|
||||
|
||||
# Patch the retry to avoid waiting during a unit test
|
||||
with patch.object(base, "retry", _patched_retry):
|
||||
@@ -104,17 +94,15 @@ def test_openai_retries(mock_completion: dict) -> None:
|
||||
"client",
|
||||
mock_client,
|
||||
):
|
||||
res = llm.predict("bar", callbacks=[callback_handler])
|
||||
res = llm.predict("bar")
|
||||
assert res == "Bar Baz"
|
||||
assert completed
|
||||
assert raised
|
||||
assert callback_handler.retries == 1
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
_openai_v1_installed(), reason="Retries only handled by LangChain for openai<1"
|
||||
)
|
||||
@pytest.mark.requires("openai")
|
||||
async def test_openai_async_retries(mock_completion: dict) -> None:
|
||||
llm = OpenAI()
|
||||
mock_client = MagicMock()
|
||||
@@ -132,7 +120,6 @@ async def test_openai_async_retries(mock_completion: dict) -> None:
|
||||
return mock_completion
|
||||
|
||||
mock_client.acreate = araise_once
|
||||
callback_handler = FakeAsyncCallbackHandler()
|
||||
# Patch the retry to avoid waiting during a unit test
|
||||
with patch.object(base, "retry", _patched_retry):
|
||||
with patch.object(
|
||||
@@ -140,8 +127,7 @@ async def test_openai_async_retries(mock_completion: dict) -> None:
|
||||
"client",
|
||||
mock_client,
|
||||
):
|
||||
res = await llm.apredict("bar", callbacks=[callback_handler])
|
||||
res = await llm.apredict("bar")
|
||||
assert res == "Bar Baz"
|
||||
assert completed
|
||||
assert raised
|
||||
assert callback_handler.retries == 1
|
||||
318
.scripts/community_split/script_integrations.sh
Executable file
318
.scripts/community_split/script_integrations.sh
Executable file
@@ -0,0 +1,318 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd libs
|
||||
|
||||
# cleanup anything existing
|
||||
git checkout master -- langchain/{langchain,tests}
|
||||
git checkout master -- core/{langchain_core,tests}
|
||||
git checkout master -- experimental/{langchain_experimental,tests}
|
||||
rm -rf community/{langchain_community,tests}
|
||||
rm -rf partners/openai/{langchain_openai,tests}
|
||||
|
||||
# make new dirs
|
||||
mkdir -p community/langchain_community
|
||||
touch community/langchain_community/__init__.py
|
||||
touch community/README.md
|
||||
mkdir -p community/tests
|
||||
touch community/tests/__init__.py
|
||||
mkdir community/tests/unit_tests
|
||||
touch community/tests/unit_tests/__init__.py
|
||||
mkdir community/tests/integration_tests/
|
||||
touch community/tests/integration_tests/__init__.py
|
||||
mkdir -p community/langchain_community/utils
|
||||
touch community/langchain_community/utils/__init__.py
|
||||
mkdir -p community/tests/unit_tests/utils
|
||||
touch community/tests/unit_tests/utils/__init__.py
|
||||
mkdir -p community/langchain_community/indexes
|
||||
touch community/langchain_community/indexes/__init__.py
|
||||
mkdir community/tests/unit_tests/indexes
|
||||
touch community/tests/unit_tests/indexes/__init__.py
|
||||
|
||||
# import core stuff from core
|
||||
cd langchain
|
||||
|
||||
git grep -l 'from langchain.pydantic_v1' | xargs sed -i '' 's/from langchain.pydantic_v1/from langchain_core.pydantic_v1/g'
|
||||
git grep -l 'from langchain.tools.base' | xargs sed -i '' 's/from langchain.tools.base/from langchain_core.tools/g'
|
||||
git grep -l 'from langchain.chat_models.base' | xargs sed -i '' 's/from langchain.chat_models.base/from langchain_core.language_models.chat_models/g'
|
||||
git grep -l 'from langchain.llms.base' | xargs sed -i '' 's/from langchain.llms.base/from langchain_core.language_models.llms/g'
|
||||
git grep -l 'from langchain.embeddings.base' | xargs sed -i '' 's/from langchain.embeddings.base/from langchain_core.embeddings/g'
|
||||
git grep -l 'from langchain.vectorstores.base' | xargs sed -i '' 's/from langchain.vectorstores.base/from langchain_core.vectorstores/g'
|
||||
git grep -l 'from langchain.agents.tools' | xargs sed -i '' 's/from langchain.agents.tools/from langchain_core.tools/g'
|
||||
git grep -l 'from langchain.schema.output' | xargs sed -i '' 's/from langchain.schema.output/from langchain_core.outputs/g'
|
||||
git grep -l 'from langchain.schema.messages' | xargs sed -i '' 's/from langchain.schema.messages/from langchain_core.messages/g'
|
||||
git grep -l 'from langchain.schema.embeddings' | xargs sed -i '' 's/from langchain.schema.embeddings/from langchain_core.embeddings/g'
|
||||
|
||||
# mv stuff to community
|
||||
cd ..
|
||||
|
||||
mv langchain/langchain/chat_loaders community/langchain_community
|
||||
mv langchain/langchain/document_loaders community/langchain_community
|
||||
mv langchain/langchain/docstore community/langchain_community
|
||||
mv langchain/langchain/document_transformers community/langchain_community
|
||||
mv langchain/langchain/embeddings community/langchain_community
|
||||
mv langchain/langchain/graphs community/langchain_community
|
||||
mv langchain/langchain/llms community/langchain_community
|
||||
mv langchain/langchain/chat_models community/langchain_community
|
||||
mv langchain/langchain/memory/chat_message_histories community/langchain_community
|
||||
mv langchain/langchain/storage community/langchain_community
|
||||
mv langchain/langchain/tools community/langchain_community
|
||||
mv langchain/langchain/utilities community/langchain_community
|
||||
mv langchain/langchain/vectorstores community/langchain_community
|
||||
mv langchain/langchain/adapters community/langchain_community
|
||||
mv langchain/langchain/agents/agent_toolkits community/langchain_community
|
||||
mv langchain/langchain/cache.py community/langchain_community
|
||||
mv langchain/langchain/callbacks community/langchain_community/callbacks
|
||||
mv langchain/langchain/indexes/base.py community/langchain_community/indexes
|
||||
mv langchain/langchain/indexes/_sql_record_manager.py community/langchain_community/indexes
|
||||
mv langchain/langchain/utils/math.py community/langchain_community/utils
|
||||
|
||||
# mv stuff to openai
|
||||
mv langchain/langchain/utils/openai.py partners/openai/langchain_openai/utils.py
|
||||
mv langchain/langchain/utils/openai_functions.py partners/openai/langchain_openai/functions.py
|
||||
|
||||
# mv stuff to core
|
||||
mv langchain/langchain/utils/json_schema.py core/langchain_core/utils
|
||||
mv langchain/langchain/utils/html.py core/langchain_core/utils
|
||||
mv langchain/langchain/utils/strings.py core/langchain_core/utils
|
||||
cat langchain/langchain/utils/env.py >> core/langchain_core/utils/env.py
|
||||
rm langchain/langchain/utils/env.py
|
||||
|
||||
# mv unit tests to community
|
||||
mv langchain/tests/unit_tests/chat_loaders community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/document_loaders community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/docstore community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/document_transformers community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/embeddings community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/graphs community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/llms community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/chat_models community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/memory/chat_message_histories community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/storage community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/tools community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/utilities community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/vectorstores community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/callbacks community/tests/unit_tests
|
||||
mv langchain/tests/unit_tests/indexes/test_sql_record_manager.py community/tests/unit_tests/indexes
|
||||
mv langchain/tests/unit_tests/utils/test_math.py community/tests/unit_tests/utils
|
||||
|
||||
# cp some test helpers back to langchain
|
||||
mkdir -p langchain/tests/unit_tests/llms
|
||||
cp {community,langchain}/tests/unit_tests/llms/fake_llm.py
|
||||
cp {community,langchain}/tests/unit_tests/llms/fake_chat_model.py
|
||||
mkdir -p langchain/tests/unit_tests/callbacks
|
||||
cp {community,langchain}/tests/unit_tests/callbacks/fake_callback_handler.py
|
||||
|
||||
# mv unit tests to core
|
||||
mv langchain/tests/unit_tests/utils/test_json_schema.py core/tests/unit_tests/utils
|
||||
mv langchain/tests/unit_tests/utils/test_html.py core/tests/unit_tests/utils
|
||||
|
||||
# mv integration tests to community
|
||||
mv langchain/tests/integration_tests/document_loaders community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/embeddings community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/graphs community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/llms community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/chat_models community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/memory/chat_message_histories community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/storage community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/tools community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/utilities community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/vectorstores community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/adapters community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/callbacks community/tests/integration_tests
|
||||
mv langchain/tests/integration_tests/{test_kuzu,test_nebulagraph}.py community/tests/integration_tests/graphs
|
||||
touch community/tests/integration_tests/{chat_message_histories,tools}/__init__.py
|
||||
|
||||
# import new core stuff from core (everywhere)
|
||||
git grep -l 'from langchain.utils.json_schema' | xargs sed -i '' 's/from langchain.utils.json_schema/from langchain_core.utils.json_schema/g'
|
||||
git grep -l 'from langchain.utils.html' | xargs sed -i '' 's/from langchain.utils.html/from langchain_core.utils.html/g'
|
||||
git grep -l 'from langchain.utils.strings' | xargs sed -i '' 's/from langchain.utils.strings/from langchain_core.utils.strings/g'
|
||||
git grep -l 'from langchain.utils.env' | xargs sed -i '' 's/from langchain.utils.env/from langchain_core.utils.env/g'
|
||||
|
||||
git add community
|
||||
cd community
|
||||
|
||||
# import core stuff from core
|
||||
git grep -l 'from langchain.pydantic_v1' | xargs sed -i '' 's/from langchain.pydantic_v1/from langchain_core.pydantic_v1/g'
|
||||
git grep -l 'from langchain.callbacks.base' | xargs sed -i '' 's/from langchain.callbacks.base/from langchain_core.callbacks/g'
|
||||
git grep -l 'from langchain.callbacks.stdout' | xargs sed -i '' 's/from langchain.callbacks.stdout/from langchain_core.callbacks/g'
|
||||
git grep -l 'from langchain.callbacks.streaming_stdout' | xargs sed -i '' 's/from langchain.callbacks.streaming_stdout/from langchain_core.callbacks/g'
|
||||
git grep -l 'from langchain.callbacks.manager' | xargs sed -i '' 's/from langchain.callbacks.manager/from langchain_core.callbacks/g'
|
||||
git grep -l 'from langchain.callbacks.tracers.base' | xargs sed -i '' 's/from langchain.callbacks.tracers.base/from langchain_core.tracers/g'
|
||||
git grep -l 'from langchain.tools.base' | xargs sed -i '' 's/from langchain.tools.base/from langchain_core.tools/g'
|
||||
git grep -l 'from langchain.agents.tools' | xargs sed -i '' 's/from langchain.agents.tools/from langchain_core.tools/g'
|
||||
git grep -l 'from langchain.schema.output' | xargs sed -i '' 's/from langchain.schema.output/from langchain_core.outputs/g'
|
||||
git grep -l 'from langchain.schema.messages' | xargs sed -i '' 's/from langchain.schema.messages/from langchain_core.messages/g'
|
||||
|
||||
# import openai stuff from openai
|
||||
git grep -l 'from langchain.utils.math' | xargs sed -i '' 's/from langchain.utils.math/from langchain_community.utils.math/g'
|
||||
git grep -l 'from langchain.utils.openai_functions' | xargs sed -i '' 's/from langchain.utils.openai_functions/from langchain_openai.functions/g'
|
||||
git grep -l 'from langchain.utils.openai' | xargs sed -i '' 's/from langchain.utils.openai/from langchain_openai.utils/g'
|
||||
git grep -l 'from langchain.chat_models.openai' | xargs sed -i '' 's/from\ langchain.chat_models.openai/from langchain_openai.chat_models/g'
|
||||
git grep -l 'from langchain.chat_models import ChatOpenAI' | xargs sed -i '' 's/from\ langchain.chat_models\ import\ ChatOpenAI/from langchain_openai.chat_models import ChatOpenAI/g'
|
||||
git grep -l 'from langchain.chat_models import AzureChatOpenAI' | xargs sed -i '' 's/from\ langchain.chat_models\ import\ AzureChatOpenAI/from langchain_openai.chat_models import AzureChatOpenAI/g'
|
||||
git grep -l 'from langchain.llms import OpenAI' | xargs sed -i '' 's/from\ langchain.llms\ import\ OpenAI/from langchain_openai.llms import OpenAI/g'
|
||||
git grep -l 'from langchain.llms import AzureOpenAI' | xargs sed -i '' 's/from\ langchain.llms\ import\ AzureOpenAI/from langchain_openai.llms import AzureOpenAI/g'
|
||||
git grep -l 'from langchain.embeddings import OpenAIEmbeddings' | xargs sed -i '' 's/from\ langchain.embeddings\ import\ OpenAIEmbeddings/from langchain_openai.embeddings import OpenAIEmbeddings/g'
|
||||
git grep -l 'from langchain.embeddings import AzureOpenAIEmbeddings' | xargs sed -i '' 's/from\ langchain.embeddings\ import\ AzureOpenAIEmbeddings/from langchain_openai.embeddings import AzureOpenAIEmbeddings/g'
|
||||
git grep -l 'from langchain.chat_models.azure_openai' | xargs sed -i '' 's/from\ langchain.chat_models.azure_openai/from langchain_openai.chat_models/g'
|
||||
git grep -l 'from langchain.embeddings.openai' | xargs sed -i '' 's/from langchain.embeddings.openai/from langchain_openai.embeddings/g'
|
||||
git grep -l 'from langchain.embeddings.azure_openai' | xargs sed -i '' 's/from langchain.embeddings.azure_openai/from langchain_openai.embeddings/g'
|
||||
git grep -l 'from langchain.adapters.openai' | xargs sed -i '' 's/from langchain.adapters.openai/from langchain_openai.adapters/g'
|
||||
git grep -l 'from langchain.adapters import openai' | xargs sed -i '' 's/from\ langchain.adapters\ import\ openai/from\ langchain_openai\ import\ adapters/g'
|
||||
git grep -l 'from langchain.llms.openai' | xargs sed -i '' 's/from langchain.llms.openai/from langchain_openai.llms/g'
|
||||
git grep -l 'from langchain.utils' | xargs sed -i '' 's/from langchain.utils/from langchain_core.utils/g'
|
||||
git grep -l 'from langchain\.' | xargs sed -i '' 's/from langchain\./from langchain_community./g'
|
||||
git grep -l 'from langchain_community.memory.chat_message_histories' | xargs sed -i '' 's/from langchain_community.memory.chat_message_histories/from langchain_community.chat_message_histories/g'
|
||||
git grep -l 'from langchain_community.agents.agent_toolkits' | xargs sed -i '' 's/from langchain_community.agents.agent_toolkits/from langchain_community.agent_toolkits/g'
|
||||
|
||||
git grep -l 'from langchain_community\.text_splitter' | xargs sed -i '' 's/from langchain_community\.text_splitter/from langchain.text_splitter/g'
|
||||
git grep -l 'from langchain_community\.chains' | xargs sed -i '' 's/from langchain_community\.chains/from langchain.chains/g'
|
||||
git grep -l 'from langchain_community\.agents' | xargs sed -i '' 's/from langchain_community\.agents/from langchain.agents/g'
|
||||
git grep -l 'from langchain_community\.memory' | xargs sed -i '' 's/from langchain_community\.memory/from langchain.memory/g'
|
||||
git grep -l 'langchain\.__version__' | xargs sed -i '' 's/langchain\.__version__/langchain_community.__version__/g'
|
||||
git grep -l 'langchain\.document_loaders' | xargs sed -i '' 's/langchain\.document_loaders/langchain_community.document_loaders/g'
|
||||
git grep -l 'langchain\.callbacks' | xargs sed -i '' 's/langchain\.callbacks/langchain_community.callbacks/g'
|
||||
git grep -l 'langchain\.tools' | xargs sed -i '' 's/langchain\.tools/langchain_community.tools/g'
|
||||
git grep -l 'langchain\.llms' | xargs sed -i '' 's/langchain\.llms/langchain_community.llms/g'
|
||||
git grep -l 'import langchain$' | xargs sed -i '' 's/import\ langchain$/import\ langchain_community/g'
|
||||
git grep -l 'from\ langchain\ ' | xargs sed -i '' 's/from\ langchain\ /from\ langchain_community\ /g'
|
||||
git grep -l 'langchain_core.language_models.llmsten' | xargs sed -i '' 's/langchain_core.language_models.llmsten/langchain_community.llms.baseten/g'
|
||||
|
||||
# update all moved langchain files to re-export classes and functions
|
||||
cd ../langchain
|
||||
git checkout master -- langchain
|
||||
|
||||
python ../../.scripts/community_split/update_imports.py langchain/chat_loaders langchain_community.chat_loaders
|
||||
python ../../.scripts/community_split/update_imports.py langchain/callbacks langchain_community.callbacks
|
||||
python ../../.scripts/community_split/update_imports.py langchain/document_loaders langchain_community.document_loaders
|
||||
python ../../.scripts/community_split/update_imports.py langchain/docstore langchain_community.docstore
|
||||
python ../../.scripts/community_split/update_imports.py langchain/document_transformers langchain_community.document_transformers
|
||||
python ../../.scripts/community_split/update_imports.py langchain/embeddings langchain_community.embeddings
|
||||
python ../../.scripts/community_split/update_imports.py langchain/graphs langchain_community.graphs
|
||||
python ../../.scripts/community_split/update_imports.py langchain/llms langchain_community.llms
|
||||
python ../../.scripts/community_split/update_imports.py langchain/chat_models langchain_community.chat_models
|
||||
python ../../.scripts/community_split/update_imports.py langchain/memory/chat_message_histories langchain_community.chat_message_histories
|
||||
python ../../.scripts/community_split/update_imports.py langchain/storage langchain_community.storage
|
||||
python ../../.scripts/community_split/update_imports.py langchain/tools langchain_community.tools
|
||||
python ../../.scripts/community_split/update_imports.py langchain/utilities langchain_community.utilities
|
||||
python ../../.scripts/community_split/update_imports.py langchain/vectorstores langchain_community.vectorstores
|
||||
python ../../.scripts/community_split/update_imports.py langchain/adapters langchain_community.adapters
|
||||
python ../../.scripts/community_split/update_imports.py langchain/agents/agent_toolkits langchain_community.agent_toolkits
|
||||
python ../../.scripts/community_split/update_imports.py langchain/cache.py langchain_community.cache
|
||||
python ../../.scripts/community_split/update_imports.py langchain/utils/math.py langchain_community.utils.math
|
||||
python ../../.scripts/community_split/update_imports.py langchain/utils/json_schema.py langchain_core.utils.json_schema
|
||||
python ../../.scripts/community_split/update_imports.py langchain/utils/html.py langchain_core.utils.html
|
||||
python ../../.scripts/community_split/update_imports.py langchain/utils/env.py langchain_core.utils.env
|
||||
python ../../.scripts/community_split/update_imports.py langchain/utils/strings.py langchain_core.utils.strings
|
||||
python ../../.scripts/community_split/update_imports.py langchain/utils/openai.py langchain_openai.utils
|
||||
python ../../.scripts/community_split/update_imports.py langchain/utils/openai_functions.py langchain_openai.functions
|
||||
|
||||
# update core and openai imports
|
||||
git grep -l 'from langchain.llms.base ' | xargs sed -i '' 's/from langchain.llms.base /from langchain_core.language_models.llms /g'
|
||||
git grep -l 'from langchain.chat_models.base ' | xargs sed -i '' 's/from langchain.chat_models.base /from langchain_core.language_models.chat_models /g'
|
||||
git grep -l 'from langchain.tools.base' | xargs sed -i '' 's/from langchain.tools.base/from langchain_core.tools/g'
|
||||
git grep -l 'from langchain_community.llms.openai' | xargs sed -i '' 's/from langchain_community.llms.openai/from langchain_openai.llms/g'
|
||||
git grep -l 'from langchain_community.chat_models.openai' | xargs sed -i '' 's/from langchain_community.chat_models.openai/from langchain_openai.chat_models/g'
|
||||
git grep -l 'from langchain_community.chat_models.azure_openai' | xargs sed -i '' 's/from langchain_community.chat_models.azure_openai/from langchain_openai.chat_models/g'
|
||||
git grep -l 'from langchain_community.embeddings.openai' | xargs sed -i '' 's/from langchain_community.embeddings.openai/from langchain_openai.embeddings/g'
|
||||
git grep -l 'from langchain_community.embeddings.azure_openai' | xargs sed -i '' 's/from langchain_community.embeddings.azure_openai/from langchain_openai.embeddings/g'
|
||||
git grep -l 'from langchain_community.adapters.openai' | xargs sed -i '' 's/from langchain_community.adapters.openai/from langchain_openai.adapters/g'
|
||||
|
||||
git grep -l 'langchain_core.language_models.llmsten' | xargs sed -i '' 's/langchain_core.language_models.llmsten/langchain_community.llms.baseten/g'
|
||||
|
||||
cd ..
|
||||
|
||||
mv community/langchain_community/utilities/loading.py langchain/langchain/utilities
|
||||
mv community/langchain_community/utilities/asyncio.py langchain/langchain/utilities
|
||||
|
||||
# move from community to openai
|
||||
mv community/langchain_community/chat_models/openai.py partners/openai/langchain_openai/chat_models/base.py
|
||||
mv community/langchain_community/chat_models/azure_openai.py partners/openai/langchain_openai/chat_models/azure.py
|
||||
mv community/langchain_community/llms/openai.py partners/openai/langchain_openai/llms/base.py
|
||||
mv community/langchain_community/embeddings/openai.py partners/openai/langchain_openai/embeddings/base.py
|
||||
mv community/langchain_community/embeddings/azure_openai.py partners/openai/langchain_openai/embeddings/azure.py
|
||||
mv community/langchain_community/adapters/openai.py partners/openai/langchain_openai/adapters.py
|
||||
|
||||
# move tests from community to openai
|
||||
mv community/langchain_community/chat_models/openai.py partners/openai/langchain_openai/chat_models/base.py
|
||||
mv community/tests/unit_tests/chat_models/test_openai.py partners/openai/tests/unit_tests/chat_models/test_base.py
|
||||
mv community/tests/integration_tests/chat_models/test_openai.py partners/openai/tests/integration_tests/chat_models/test_base.py
|
||||
mv community/tests/unit_tests/chat_models/test_azureopenai.py partners/openai/tests/unit_tests/chat_models/test_azure.py
|
||||
mv community/tests/integration_tests/chat_models/test_azure_openai.py partners/openai/tests/integration_tests/chat_models/test_azure.py
|
||||
mv community/tests/unit_tests/llms/test_openai.py partners/openai/tests/unit_tests/llms/test_base.py
|
||||
mv community/tests/integration_tests/llms/test_openai.py partners/openai/tests/integration_tests/llms/test_base.py
|
||||
mv community/tests/integration_tests/llms/test_azure_openai.py partners/openai/tests/integration_tests/llms/test_azure.py
|
||||
mv community/tests/unit_tests/embeddings/test_openai.py partners/openai/tests/unit_tests/embeddings/test_base.py
|
||||
mv community/tests/integration_tests/embeddings/test_openai.py partners/openai/tests/integration_tests/embeddings/test_base.py
|
||||
mv community/tests/integration_tests/embeddings/test_azure_openai.py partners/openai/tests/integration_tests/embeddings/test_azure.py
|
||||
|
||||
mkdir -p partners/openai/tests/integration_tests/adapters
|
||||
mv community/tests/integration_tests/adapters/{__init__,test_openai}.py partners/openai/tests/integration_tests/adapters
|
||||
|
||||
git add partners core
|
||||
|
||||
# rm files from community that just export core classes
|
||||
rm community/langchain_community/{chat_models,llms,tools,embeddings,vectorstores,callbacks}/base.py
|
||||
rm community/tests/unit_tests/{chat_models,llms,tools,callbacks}/test_base.py
|
||||
rm community/tests/unit_tests/callbacks/test_manager.py
|
||||
rm community/langchain_community/callbacks/{stdout,streaming_stdout}.py
|
||||
rm community/langchain_community/callbacks/tracers/{base,evaluation,langchain,langchain_v1,log_stream,root_listeners,run_collector,schemas,stdout}.py
|
||||
|
||||
# keep export tests in langchain
|
||||
git checkout master -- langchain/tests/unit_tests/{chat_models,llms,tools,callbacks,document_loaders}/test_base.py
|
||||
git checkout master -- langchain/tests/unit_tests/{callbacks,docstore,document_loaders,document_transformers,embeddings,graphs,llms,chat_models,storage,tools,utilities,vectorstores}/test_imports.py
|
||||
git checkout master -- langchain/tests/unit_tests/callbacks/test_manager.py
|
||||
git checkout master -- langchain/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py
|
||||
git checkout master -- langchain/tests/unit_tests/document_loaders/parsers/test_public_api.py
|
||||
git checkout master -- langchain/tests/unit_tests/vectorstores/test_public_api.py
|
||||
git checkout master -- langchain/tests/unit_tests/schema
|
||||
touch langchain/tests/unit_tests/{llms,chat_models,tools,callbacks,runnables,document_loaders,docstore,document_transformers,embeddings,graphs,storage,utilities,vectorstores}/__init__.py
|
||||
touch langchain/tests/unit_tests/document_loaders/{blob_loaders,parsers}/__init__.py
|
||||
|
||||
# cp lint scripts
|
||||
cp -r core/scripts community
|
||||
cp -r core/scripts partners/openai
|
||||
|
||||
# circular imports
|
||||
sed -i '' 's/from\ langchain_openai.chat_models\ /from\ langchain_openai.chat_models.base\ /g' partners/openai/langchain_openai/chat_models/azure.py
|
||||
sed -i '' 's/from\ langchain_openai.embeddings\ /from\ langchain_openai.embeddings.base\ /g' partners/openai/langchain_openai/embeddings/azure.py
|
||||
|
||||
# rm requires marker in openai
|
||||
git grep -l '@pytest\.mark\.requires' partners/openai | xargs sed -i '' 's/@pytest\.mark\.requires("openai")//g'
|
||||
|
||||
# cp test helpers
|
||||
cp -r langchain/tests/integration_tests/examples community/tests
|
||||
cp -r langchain/tests/integration_tests/examples community/tests/integration_tests
|
||||
cp -r langchain/tests/unit_tests/examples community/tests/unit_tests
|
||||
cp langchain/tests/unit_tests/conftest.py community/tests/unit_tests
|
||||
cp langchain/tests/integration_tests/test_compile.py community/tests/integration_tests
|
||||
cp langchain/tests/integration_tests/test_compile.py partners/openai/tests/integration_tests
|
||||
cp community/tests/integration_tests/vectorstores/fake_embeddings.py langchain/tests/integration_tests/cache/
|
||||
|
||||
# cp manually changed files
|
||||
cp -r ../.scripts/community_split/libs/* .
|
||||
|
||||
# mv some tests to integrations
|
||||
mv community/tests/{unit_tests,integration_tests}/document_loaders/test_telegram.py
|
||||
mv community/tests/{unit_tests,integration_tests}/document_loaders/parsers/test_docai.py
|
||||
mv community/tests/{unit_tests,integration_tests}/chat_message_histories/test_streamlit.py
|
||||
|
||||
# fix some final tests
|
||||
g grep -l 'integration_tests\.vectorstores\.fake_embeddings' community/tests | xargs sed -i '' 's/integration_tests\.vectorstores\.fake_embeddings/integration_tests.cache.fake_embeddings/g'
|
||||
sed -i '' 's/llms\.loading\.get_type_to_cls_dict/llms.get_type_to_cls_dict/g' langchain/tests/unit_tests/chains/test_llm.py
|
||||
|
||||
# format
|
||||
cd core
|
||||
make format
|
||||
cd ../langchain
|
||||
make format
|
||||
cd ../experimental
|
||||
make format
|
||||
cd ../community
|
||||
make format
|
||||
cd ../partners/openai
|
||||
make format
|
||||
|
||||
cd ../..
|
||||
sed -E -i '' '1 s/(.*)/\1\ \ \#\ noqa\:\ E501/g' langchain/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py
|
||||
touch community/langchain_community/agent_toolkits/amadeus/__init__.py
|
||||
@@ -1,6 +1,7 @@
|
||||
import ast
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class ImportTransformer(ast.NodeTransformer):
|
||||
@@ -33,19 +34,26 @@ def find_public_classes_and_methods(file_path):
|
||||
|
||||
public_items = []
|
||||
for item in node.body:
|
||||
if (
|
||||
isinstance(item, ast.ClassDef) or isinstance(item, ast.FunctionDef)
|
||||
) and not item.name.startswith("_"):
|
||||
if isinstance(item, ast.ClassDef) or isinstance(item, ast.FunctionDef):
|
||||
public_items.append(item.name)
|
||||
if (
|
||||
isinstance(item, ast.Assign)
|
||||
and hasattr(item.targets[0], "id")
|
||||
and item.targets[0].id not in ("__all__", "logger")
|
||||
):
|
||||
public_items.append(item.targets[0].id)
|
||||
|
||||
return public_items
|
||||
return public_items or None
|
||||
|
||||
|
||||
def process_file(file_path, module_name):
|
||||
public_items = find_public_classes_and_methods(file_path)
|
||||
if public_items is None:
|
||||
return
|
||||
|
||||
with open(file_path, "r") as file:
|
||||
tree = ast.parse(file.read(), filename=file_path)
|
||||
contents = file.read()
|
||||
tree = ast.parse(contents, filename=file_path)
|
||||
|
||||
tree = ImportTransformer(public_items, module_name).visit(tree)
|
||||
tree = ast.fix_missing_locations(tree)
|
||||
@@ -55,13 +63,16 @@ def process_file(file_path, module_name):
|
||||
|
||||
|
||||
def process_directory(directory_path, base_module_name):
|
||||
for root, dirs, files in os.walk(directory_path):
|
||||
for filename in files:
|
||||
if filename.endswith(".py") and not filename.startswith("_"):
|
||||
file_path = os.path.join(root, filename)
|
||||
relative_path = os.path.relpath(file_path, directory_path)
|
||||
module_name = f"{base_module_name}.{os.path.splitext(relative_path)[0].replace(os.sep, '.')}"
|
||||
process_file(file_path, module_name)
|
||||
if Path(directory_path).is_file():
|
||||
process_file(directory_path, base_module_name)
|
||||
else:
|
||||
for root, dirs, files in os.walk(directory_path):
|
||||
for filename in files:
|
||||
if filename.endswith(".py") and not filename.startswith("_"):
|
||||
file_path = os.path.join(root, filename)
|
||||
relative_path = os.path.relpath(file_path, directory_path)
|
||||
module_name = f"{base_module_name}.{os.path.splitext(relative_path)[0].replace(os.sep, '.')}"
|
||||
process_file(file_path, module_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
2
Makefile
2
Makefile
@@ -41,7 +41,7 @@ spell_fix:
|
||||
# LINTING AND FORMATTING
|
||||
######################
|
||||
|
||||
lint:
|
||||
lint lint_package lint_tests:
|
||||
poetry run ruff docs templates cookbook
|
||||
poetry run ruff format docs templates cookbook --diff
|
||||
poetry run ruff --select I docs templates cookbook
|
||||
|
||||
@@ -104,3 +104,7 @@ Please see [here](https://python.langchain.com) for full documentation, which in
|
||||
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
||||
|
||||
For detailed information on how to contribute, see [here](.github/CONTRIBUTING.md).
|
||||
|
||||
## 🌟 Contributors
|
||||
|
||||
[](https://github.com/langchain-ai/langchain/graphs/contributors)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user