mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 17:20:16 +00:00
Compare commits
50 Commits
langchain=
...
jacob/curr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
080f287a2c | ||
|
|
36919b19b6 | ||
|
|
55a6347478 | ||
|
|
6ef6c9e7f1 | ||
|
|
b1ac3925f7 | ||
|
|
244cd5c141 | ||
|
|
412bc82c11 | ||
|
|
fc3353636a | ||
|
|
7130aa826f | ||
|
|
367b2d8dbe | ||
|
|
432ccd686d | ||
|
|
c4417ea93c | ||
|
|
7a62d3dbd6 | ||
|
|
2428984205 | ||
|
|
ea3cd1ebba | ||
|
|
3e454d7568 | ||
|
|
08638ccc88 | ||
|
|
ee3fe20af4 | ||
|
|
1e7d8ba9a6 | ||
|
|
16e178a8c2 | ||
|
|
5fc5ef2b52 | ||
|
|
9bcf8f867d | ||
|
|
092e9ee0e6 | ||
|
|
10d8c3cbfa | ||
|
|
555c6d3c20 | ||
|
|
dc131ac42a | ||
|
|
14a8bbc21a | ||
|
|
1de1182a9f | ||
|
|
71c2221f8c | ||
|
|
6ea6f9f7bc | ||
|
|
975b6129f6 | ||
|
|
b63a48b7d3 | ||
|
|
9de562f747 | ||
|
|
141943a7e1 | ||
|
|
6928f4c438 | ||
|
|
14dd89a1ee | ||
|
|
c4e149d4f1 | ||
|
|
9c6efadec3 | ||
|
|
91b37b2d81 | ||
|
|
1e1fd30def | ||
|
|
66265aaac4 | ||
|
|
8dac0fb3f1 | ||
|
|
68fee3e44b | ||
|
|
13855ef0c3 | ||
|
|
34a02efcf9 | ||
|
|
859e434932 | ||
|
|
160fc7f246 | ||
|
|
73966e693c | ||
|
|
007c5a85d5 | ||
|
|
e80c150c44 |
66
.github/scripts/check_diff.py
vendored
66
.github/scripts/check_diff.py
vendored
@@ -53,6 +53,44 @@ def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
|
||||
return list(updated)
|
||||
|
||||
|
||||
def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
|
||||
min_python = "3.8"
|
||||
max_python = "3.12"
|
||||
|
||||
# custom logic for specific directories
|
||||
if dir_ == "libs/partners/milvus":
|
||||
# milvus poetry doesn't allow 3.12 because they
|
||||
# declare deps in funny way
|
||||
max_python = "3.11"
|
||||
|
||||
return [
|
||||
{"working-directory": dir_, "python-version": min_python},
|
||||
{"working-directory": dir_, "python-version": max_python},
|
||||
]
|
||||
|
||||
|
||||
def _get_configs_for_multi_dirs(
|
||||
job: str, dirs_to_run: List[str], dependents: dict
|
||||
) -> List[Dict[str, str]]:
|
||||
if job == "lint":
|
||||
dirs = add_dependents(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
|
||||
dependents,
|
||||
)
|
||||
elif job in ["test", "compile-integration-tests", "dependencies"]:
|
||||
dirs = add_dependents(
|
||||
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
)
|
||||
elif job == "extended-tests":
|
||||
dirs = list(dirs_to_run["extended-test"])
|
||||
else:
|
||||
raise ValueError(f"Unknown job: {job}")
|
||||
|
||||
return [
|
||||
config for dir_ in dirs for config in _get_configs_for_single_dir(job, dir_)
|
||||
]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
files = sys.argv[1:]
|
||||
|
||||
@@ -126,17 +164,23 @@ if __name__ == "__main__":
|
||||
|
||||
dependents = dependents_graph()
|
||||
|
||||
outputs = {
|
||||
"dirs-to-lint": add_dependents(
|
||||
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
|
||||
dependents,
|
||||
),
|
||||
"dirs-to-test": add_dependents(
|
||||
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
|
||||
),
|
||||
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
|
||||
"docs-edited": "true" if docs_edited else "",
|
||||
# we now have dirs_by_job
|
||||
# todo: clean this up
|
||||
|
||||
map_job_to_configs = {
|
||||
job: _get_configs_for_multi_dirs(job, dirs_to_run, dependents)
|
||||
for job in [
|
||||
"lint",
|
||||
"test",
|
||||
"extended-tests",
|
||||
"compile-integration-tests",
|
||||
"dependencies",
|
||||
]
|
||||
}
|
||||
for key, value in outputs.items():
|
||||
map_job_to_configs["test-doc-imports"] = (
|
||||
[{"python-version": "3.12"}] if docs_edited else []
|
||||
)
|
||||
|
||||
for key, value in map_job_to_configs.items():
|
||||
json_output = json.dumps(value)
|
||||
print(f"{key}={json_output}")
|
||||
|
||||
10
.github/workflows/_compile_integration_test.yml
vendored
10
.github/workflows/_compile_integration_test.yml
vendored
@@ -7,6 +7,10 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: "From which folder this pipeline executes"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -25,14 +29,14 @@ jobs:
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
|
||||
name: "poetry run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: compile-integration
|
||||
|
||||
18
.github/workflows/_dependencies.yml
vendored
18
.github/workflows/_dependencies.yml
vendored
@@ -11,6 +11,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -21,22 +25,14 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: dependency checks ${{ matrix.python-version }}
|
||||
name: dependency checks ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: pydantic-cross-compat
|
||||
|
||||
15
.github/workflows/_integration_test.yml
vendored
15
.github/workflows/_integration_test.yml
vendored
@@ -6,6 +6,10 @@ on:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -16,19 +20,14 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.11"
|
||||
name: Python ${{ matrix.python-version }}
|
||||
name: Python ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
26
.github/workflows/_lint.yml
vendored
26
.github/workflows/_lint.yml
vendored
@@ -11,6 +11,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -21,27 +25,15 @@ env:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: "make lint #${{ matrix.python-version }}"
|
||||
name: "make lint #${{ inputs.python-version }}"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
# Only lint on the min and max supported Python versions.
|
||||
# It's extremely unlikely that there's a lint issue on any version in between
|
||||
# that doesn't show up on the min or max versions.
|
||||
#
|
||||
# GitHub rate-limits how many jobs can be running at any one time.
|
||||
# Starting new jobs is also relatively slow,
|
||||
# so linting on fewer versions makes CI faster.
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.12"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: lint-with-extras
|
||||
@@ -86,7 +78,7 @@ jobs:
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache
|
||||
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
@@ -120,7 +112,7 @@ jobs:
|
||||
with:
|
||||
path: |
|
||||
${{ env.WORKDIR }}/.mypy_cache_test
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
|
||||
|
||||
- name: Analysing the code with our lint
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
|
||||
1
.github/workflows/_release.yml
vendored
1
.github/workflows/_release.yml
vendored
@@ -122,7 +122,6 @@ jobs:
|
||||
fi
|
||||
{
|
||||
echo 'release-body<<EOF'
|
||||
echo "# Release $TAG"
|
||||
echo $PREAMBLE
|
||||
echo
|
||||
git log --format="%s" "$PREV_TAG"..HEAD -- $WORKING_DIR
|
||||
|
||||
18
.github/workflows/_test.yml
vendored
18
.github/workflows/_test.yml
vendored
@@ -11,6 +11,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: "Relative path to the langchain library folder"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -21,22 +25,14 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
name: "make test #${{ matrix.python-version }}"
|
||||
name: "make test #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
11
.github/workflows/_test_doc_imports.yml
vendored
11
.github/workflows/_test_doc_imports.yml
vendored
@@ -2,6 +2,11 @@ name: test_doc_imports
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -13,14 +18,14 @@ jobs:
|
||||
matrix:
|
||||
python-version:
|
||||
- "3.12"
|
||||
name: "check doc imports #${{ matrix.python-version }}"
|
||||
name: "check doc imports #${{ inputs.python-version }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
cache-key: core
|
||||
|
||||
|
||||
83
.github/workflows/check_diffs.yml
vendored
83
.github/workflows/check_diffs.yml
vendored
@@ -33,91 +33,96 @@ jobs:
|
||||
run: |
|
||||
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
|
||||
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
|
||||
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
|
||||
docs-edited: ${{ steps.set-matrix.outputs.docs-edited }}
|
||||
lint: ${{ steps.set-matrix.outputs.lint }}
|
||||
test: ${{ steps.set-matrix.outputs.test }}
|
||||
extended-tests: ${{ steps.set-matrix.outputs.extended-tests }}
|
||||
compile-integration-tests: ${{ steps.set-matrix.outputs.compile-integration-tests }}
|
||||
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
|
||||
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
|
||||
lint:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-lint != '[]' }}
|
||||
if: ${{ needs.build.outputs.lint != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-lint) }}
|
||||
job-configs: ${{ fromJson(needs.build.outputs.lint) }}
|
||||
uses: ./.github/workflows/_lint.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
if: ${{ needs.build.outputs.test != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
job-configs: ${{ fromJson(needs.build.outputs.test) }}
|
||||
uses: ./.github/workflows/_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
test-doc-imports:
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' || needs.build.outputs.docs-edited }}
|
||||
uses: ./.github/workflows/_test_doc_imports.yml
|
||||
secrets: inherit
|
||||
|
||||
compile-integration-tests:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
if: ${{ needs.build.outputs.test-doc-imports != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
|
||||
uses: ./.github/workflows/_test_doc_imports.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
|
||||
compile-integration-tests:
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.compile-integration-tests != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }}
|
||||
uses: ./.github/workflows/_compile_integration_test.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
dependencies:
|
||||
name: cd ${{ matrix.working-directory }}
|
||||
name: cd ${{ matrix.job-configs.working-directory }}
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
|
||||
if: ${{ needs.build.outputs.dependencies != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
|
||||
job-configs: ${{ fromJson(needs.build.outputs.dependencies) }}
|
||||
uses: ./.github/workflows/_dependencies.yml
|
||||
with:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
secrets: inherit
|
||||
|
||||
extended-tests:
|
||||
name: "cd ${{ matrix.working-directory }} / make extended_tests #${{ matrix.python-version }}"
|
||||
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
|
||||
needs: [ build ]
|
||||
if: ${{ needs.build.outputs.dirs-to-extended-test != '[]' }}
|
||||
if: ${{ needs.build.outputs.extended-tests != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
# note different variable for extended test dirs
|
||||
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-extended-test) }}
|
||||
python-version:
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- "3.12"
|
||||
job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }}
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
- name: Set up Python ${{ matrix.job-configs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
cache-key: extended
|
||||
|
||||
- name: Install dependencies
|
||||
|
||||
@@ -1022,7 +1022,7 @@ See our [blog post overview](https://blog.langchain.dev/query-construction/) and
|
||||
|
||||
#### Indexing
|
||||
|
||||
Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
|
||||
Fourth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
|
||||
|
||||
Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens.
|
||||
|
||||
@@ -1130,7 +1130,7 @@ Table columns:
|
||||
| Token | [many classes](/docs/how_to/split_by_token/) | Tokens | | Splits text on tokens. There exist a few different ways to measure tokens. |
|
||||
| Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. |
|
||||
| Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) |
|
||||
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
|
||||
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
|
||||
|
||||
### Evaluation
|
||||
<span data-heading-keywords="evaluation,evaluate"></span>
|
||||
|
||||
@@ -11,7 +11,7 @@ There are a few different places you can contribute integrations for LangChain:
|
||||
- **Community**: For lighter-weight integrations that are primarily maintained by LangChain and the Open Source Community.
|
||||
- **Partner Packages**: For independent packages that are co-maintained by LangChain and a partner.
|
||||
|
||||
For the most part, new integrations should be added to the Community package. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
|
||||
For the most part, **new integrations should be added to the Community package**. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
|
||||
|
||||
In the following sections, we'll walk through how to contribute to each of these packages from a fake company, `Parrot Link AI`.
|
||||
|
||||
@@ -60,6 +60,10 @@ And add documentation to:
|
||||
|
||||
## Partner package in LangChain repo
|
||||
|
||||
:::caution
|
||||
Before starting a **partner** package, please confirm your intent with the LangChain team. Partner packages require more maintenance as separate packages, so we will close PRs that add new partner packages without prior discussion. See the above section for how to add a community integration.
|
||||
:::
|
||||
|
||||
Partner packages can be hosted in the `LangChain` monorepo or in an external repo.
|
||||
|
||||
Partner package in the `LangChain` repo is placed in `libs/partners/{partner}`
|
||||
|
||||
@@ -153,7 +153,7 @@
|
||||
"\n",
|
||||
"#### OpenAI\n",
|
||||
"\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_options={\"include_usage\": True}`.\n",
|
||||
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
@@ -172,18 +172,18 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='Hello' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='!' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' How' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' can' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' I' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' assist' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' you' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content=' today' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='?' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop'} id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
|
||||
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='Hello' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='!' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' How' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' can' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' I' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' assist' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' you' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content=' today' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='?' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
|
||||
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -191,7 +191,7 @@
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
||||
"\n",
|
||||
"aggregate = None\n",
|
||||
"for chunk in llm.stream(\"hello\", stream_options={\"include_usage\": True}):\n",
|
||||
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
|
||||
" print(chunk)\n",
|
||||
" aggregate = chunk if aggregate is None else aggregate + chunk"
|
||||
]
|
||||
@@ -229,7 +229,7 @@
|
||||
"id": "7dba63e8-0ed7-4533-8f0f-78e19c38a25c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To disable streaming token counts for OpenAI, set `\"include_usage\"` to False in `stream_options`, or omit it from the parameters:"
|
||||
"To disable streaming token counts for OpenAI, set `stream_usage` to False, or omit it from the parameters:"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -242,17 +242,17 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"content='' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='Hello' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='!' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' How' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' can' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' I' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' assist' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' you' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content=' today' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='?' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop'} id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n"
|
||||
"content='' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='Hello' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='!' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' How' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' can' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' I' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' assist' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' you' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content=' today' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='?' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
|
||||
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -267,7 +267,7 @@
|
||||
"id": "6a5d9617-be3a-419a-9276-de9c29fa50ae",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also enable streaming token usage by setting `model_kwargs` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
|
||||
"You can also enable streaming token usage by setting `stream_usage` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
|
||||
"\n",
|
||||
"See the below example, where we return output structured to a desired schema, but can still observe token usage streamed from intermediate steps."
|
||||
]
|
||||
@@ -275,7 +275,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "57dec1fb-bd9c-4c98-8798-8fbbe67f6b2c",
|
||||
"id": "0b1523d8-127e-4314-82fa-bd97aca37f9a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -301,7 +301,7 @@
|
||||
"\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0125\",\n",
|
||||
" model_kwargs={\"stream_options\": {\"include_usage\": True}},\n",
|
||||
" stream_usage=True,\n",
|
||||
")\n",
|
||||
"# Under the hood, .with_structured_output binds tools to the\n",
|
||||
"# chat model and appends a parser.\n",
|
||||
@@ -341,7 +341,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "31667d54",
|
||||
"id": "b04a4486-72fd-48ce-8f9e-5d281b441195",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -361,7 +361,11 @@
|
||||
"\n",
|
||||
"from langchain_community.callbacks.manager import get_openai_callback\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
|
||||
"llm = ChatOpenAI(\n",
|
||||
" model=\"gpt-3.5-turbo-0125\",\n",
|
||||
" temperature=0,\n",
|
||||
" stream_usage=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" result = llm.invoke(\"Tell me a joke\")\n",
|
||||
@@ -379,14 +383,14 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "e09420f4",
|
||||
"id": "05f22a1d-b021-490f-8840-f628a07459f2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"55\n"
|
||||
"54\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -397,37 +401,29 @@
|
||||
" print(cb.total_tokens)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ac51188-c8f4-4230-90fd-3cd78cdd955d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"Cost information is currently not available in streaming mode. This is because model names are currently not propagated through chunks in streaming mode, and the model name is used to look up the correct pricing. Token counts however are available:\n",
|
||||
":::\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "b241069a-265d-4497-af34-b0a5f95ae67f",
|
||||
"id": "c00c9158-7bb4-4279-88e6-ea70f46e6ac2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"28\n"
|
||||
"Tokens Used: 27\n",
|
||||
"\tPrompt Tokens: 11\n",
|
||||
"\tCompletion Tokens: 16\n",
|
||||
"Successful Requests: 1\n",
|
||||
"Total Cost (USD): $2.95e-05\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with get_openai_callback() as cb:\n",
|
||||
" for chunk in llm.stream(\"Tell me a joke\", stream_options={\"include_usage\": True}):\n",
|
||||
" for chunk in llm.stream(\"Tell me a joke\"):\n",
|
||||
" pass\n",
|
||||
" print(cb.total_tokens)"
|
||||
" print(cb)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -457,21 +453,7 @@
|
||||
")\n",
|
||||
"tools = load_tools([\"wikipedia\"])\n",
|
||||
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
|
||||
"agent_executor = AgentExecutor(\n",
|
||||
" agent=agent, tools=tools, verbose=True, stream_runnable=False\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9c1ae74d-8300-4041-9ff4-66093ee592b1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
":::note\n",
|
||||
"We have to set `stream_runnable=False` for cost information, as described above. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events.\n",
|
||||
":::\n",
|
||||
"```"
|
||||
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -503,36 +485,30 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Anna's hummingbird\n",
|
||||
"Summary: Anna's hummingbird (Calypte anna) is a North American species of hummingbird. It was named after Anna Masséna, Duchess of Rivoli.\n",
|
||||
"It is native to western coastal regions of North America. In the early 20th century, Anna's hummingbirds bred only in northern Baja California and Southern California. The transplanting of exotic ornamental plants in residential areas throughout the Pacific coast and inland deserts provided expanded nectar and nesting sites, allowing the species to expand its breeding range. Year-round residence of Anna's hummingbirds in the Pacific Northwest is an example of ecological release dependent on acclimation to colder winter temperatures, introduced plants, and human provision of nectar feeders during winter.\n",
|
||||
"These birds feed on nectar from flowers using a long extendable tongue. They also consume small insects and other arthropods caught in flight or gleaned from vegetation.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Page: Allen's hummingbird\n",
|
||||
"Summary: Allen's hummingbird (Selasphorus sasin) is a species of hummingbird that breeds in the western United States. It is one of seven species in the genus Selasphorus.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||
"Invoking: `wikipedia` with `{'query': 'fastest bird species'}`\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of birds by flight speed\n",
|
||||
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Fastest animals\n",
|
||||
"Summary: This is a list of the fastest animals in the world, by types of animal.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Page: Falcon\n",
|
||||
"Summary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.\n",
|
||||
"Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.\n",
|
||||
"The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial \"tooth\" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.\n",
|
||||
"The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.\n",
|
||||
"Some small falcons with long, narrow wings are called \"hobbies\" and some which hover while hunting are called \"kestrels\".\n",
|
||||
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species is the peregrine falcon (Falco peregrinus), which can exceed speeds of 320 km/h (200 mph) in its dives.\u001b[0m\n",
|
||||
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species in level flight is the common swift, which holds the record for the fastest confirmed level flight by a bird at 111.5 km/h (69.3 mph). The peregrine falcon is known to exceed speeds of 320 km/h (200 mph) in its dives, making it the fastest bird in terms of diving speed.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"Total Tokens: 1787\n",
|
||||
"Prompt Tokens: 1687\n",
|
||||
"Completion Tokens: 100\n",
|
||||
"Total Cost (USD): $0.0009935\n"
|
||||
"Total Tokens: 1675\n",
|
||||
"Prompt Tokens: 1538\n",
|
||||
"Completion Tokens: 137\n",
|
||||
"Total Cost (USD): $0.0009745000000000001\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
541
docs/docs/how_to/convert_runnable_to_tool.ipynb
Normal file
541
docs/docs/how_to/convert_runnable_to_tool.ipynb
Normal file
@@ -0,0 +1,541 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a8bceb3-95bd-4496-bb9e-57655136e070",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to use Runnables as Tools\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"\n",
|
||||
"- [Runnables](/docs/concepts#runnable-interface)\n",
|
||||
"- [Tools](/docs/concepts#tools)\n",
|
||||
"- [Agents](/docs/tutorials/agents)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Here we will demonstrate how to convert a LangChain `Runnable` into a tool that can be used by agents, chains, or chat models.\n",
|
||||
"\n",
|
||||
"## Dependencies\n",
|
||||
"\n",
|
||||
"**Note**: this guide requires `langchain-core` >= 0.2.13. We will also use [OpenAI](/docs/integrations/platforms/openai/) for embeddings, but any LangChain embeddings should suffice. We will use a simple [LangGraph](https://langchain-ai.github.io/langgraph/) agent for demonstration purposes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "92341f48-2c29-4ce9-8ab8-0a7c7a7c98a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install -U langchain-core langchain-openai langgraph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b0dcc1a-48e8-4a81-b920-3563192ce076",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"LangChain [tools](/docs/concepts#tools) are interfaces that an agent, chain, or chat model can use to interact with the world. See [here](/docs/how_to/#tools) for how-to guides covering tool-calling, built-in tools, custom tools, and more information.\n",
|
||||
"\n",
|
||||
"LangChain tools-- instances of [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
|
||||
"\n",
|
||||
"- Their inputs are constrained to be serializable, specifically strings and Python `dict` objects;\n",
|
||||
"- They contain names and descriptions indicating how and when they should be used;\n",
|
||||
"- They may contain a detailed [args_schema](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
|
||||
"\n",
|
||||
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b4d76680-1b6b-4862-8c4f-22766a1d41f2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Basic usage\n",
|
||||
"\n",
|
||||
"With typed `dict` input:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b2cc4231-64a3-4733-a284-932dcbf2fcc3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from typing_extensions import TypedDict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class Args(TypedDict):\n",
|
||||
" a: int\n",
|
||||
" b: List[int]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def f(x: Args) -> str:\n",
|
||||
" return str(x[\"a\"] * max(x[\"b\"]))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"runnable = RunnableLambda(f)\n",
|
||||
"as_tool = runnable.as_tool(\n",
|
||||
" name=\"My tool\",\n",
|
||||
" description=\"Explanation of when to use tool.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "57f2d435-624d-459a-903d-8509fbbde610",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Explanation of when to use tool.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'My tool',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'a': {'title': 'A', 'type': 'integer'},\n",
|
||||
" 'b': {'title': 'B', 'type': 'array', 'items': {'type': 'integer'}}},\n",
|
||||
" 'required': ['a', 'b']}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(as_tool.description)\n",
|
||||
"\n",
|
||||
"as_tool.args_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "54ae7384-a03d-4fa4-8cdf-9604a4bc39ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'6'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"as_tool.invoke({\"a\": 3, \"b\": [1, 2]})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9038f587-4613-4f50-b349-135f9e7e3b15",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Without typing information, arg types can be specified via `arg_types`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "169f733c-4936-497f-8577-ee769dc16b88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Any, Dict\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def g(x: Dict[str, Any]) -> str:\n",
|
||||
" return str(x[\"a\"] * max(x[\"b\"]))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"runnable = RunnableLambda(g)\n",
|
||||
"as_tool = runnable.as_tool(\n",
|
||||
" name=\"My tool\",\n",
|
||||
" description=\"Explanation of when to use tool.\",\n",
|
||||
" arg_types={\"a\": int, \"b\": List[int]},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "32b1a992-8997-4c98-8eb2-c9fe9431b799",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, we can add typing information via [Runnable.with_types](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_types):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "eb102705-89b7-48dc-9158-d36d5f98ae8e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"as_tool = runnable.with_types(input_type=Args).as_tool(\n",
|
||||
" name=\"My tool\",\n",
|
||||
" description=\"Explanation of when to use tool.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7c474d85-4e01-4fae-9bba-0c6c8c26475c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"String input is also supported:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "c475282a-58d6-4c2b-af7d-99b73b7d8a13",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def f(x: str) -> str:\n",
|
||||
" return x + \"a\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def g(x: str) -> str:\n",
|
||||
" return x + \"z\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"runnable = RunnableLambda(f) | g\n",
|
||||
"as_tool = runnable.as_tool()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "ad6d8d96-3a87-40bd-a2ac-44a8acde0a8e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'baz'"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"as_tool.invoke(\"b\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "89fdb3a7-d228-48f0-8f73-262af4febb58",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## In agents\n",
|
||||
"\n",
|
||||
"Below we will incorporate LangChain Runnables as tools in an [agent](/docs/concepts/#agents) application. We will demonstrate with:\n",
|
||||
"\n",
|
||||
"- a document [retriever](/docs/concepts/#retrievers);\n",
|
||||
"- a simple [RAG](/docs/tutorials/rag/) chain, allowing an agent to delegate relevant queries to it.\n",
|
||||
"\n",
|
||||
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ChatModelTabs\n",
|
||||
" customVarName=\"llm\"\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "d06c9f2a-4475-450f-9106-54db1d99623b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e8a2038a-d762-4196-b5e3-fdb89c11e71d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Following the [RAG tutorial](/docs/tutorials/rag/), let's first construct a retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "23d2a47e-6712-4294-81c8-2c1d76b4bb81",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"documents = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Dogs are great companions, known for their loyalty and friendliness.\",\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Cats are independent pets that often enjoy their own space.\",\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_documents(\n",
|
||||
" documents, embedding=OpenAIEmbeddings()\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"retriever = vectorstore.as_retriever(\n",
|
||||
" search_type=\"similarity\",\n",
|
||||
" search_kwargs={\"k\": 1},\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/v0.2/docs/tutorials/agents/) and provide it the tool:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "c939cf2a-60e9-4afd-8b47-84d76ccb13f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"tools = [\n",
|
||||
" retriever.as_tool(\n",
|
||||
" name=\"pet_info_retriever\",\n",
|
||||
" description=\"Get information about pets.\",\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"agent = create_react_agent(llm, tools)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "be29437b-a187-4a0a-9a5d-419c56f2434e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content=\"[Document(id='86f835fe-4bbe-4ec6-aeb4-489a8b541707', page_content='Dogs are great companions, known for their loyalty and friendliness.')]\", name='pet_info_retriever', tool_call_id='call_W8cnfOjwqEn4cFcg19LN9mYD')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for chunk in agent.stream({\"messages\": [(\"human\", \"What are dogs known for?\")]}):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96f2ac9c-36f4-4b7a-ae33-f517734c86aa",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [LangSmith trace](https://smith.langchain.com/public/44e438e3-2faf-45bd-b397-5510fc145eb9/r) for the above run."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a722fd8a-b957-4ba7-b408-35596b76835f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Going further, we can create a simple [RAG](/docs/tutorials/rag/) chain that takes an additional parameter-- here, the \"style\" of the answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "bea518c9-c711-47c2-b8cc-dbd102f71f09",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"\n",
|
||||
"system_prompt = \"\"\"\n",
|
||||
"You are an assistant for question-answering tasks.\n",
|
||||
"Use the below context to answer the question. If\n",
|
||||
"you don't know the answer, say you don't know.\n",
|
||||
"Use three sentences maximum and keep the answer\n",
|
||||
"concise.\n",
|
||||
"\n",
|
||||
"Answer in the style of {answer_style}.\n",
|
||||
"\n",
|
||||
"Question: {question}\n",
|
||||
"\n",
|
||||
"Context: {context}\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages([(\"system\", system_prompt)])\n",
|
||||
"\n",
|
||||
"rag_chain = (\n",
|
||||
" {\n",
|
||||
" \"context\": itemgetter(\"question\") | retriever,\n",
|
||||
" \"question\": itemgetter(\"question\"),\n",
|
||||
" \"answer_style\": itemgetter(\"answer_style\"),\n",
|
||||
" }\n",
|
||||
" | prompt\n",
|
||||
" | llm\n",
|
||||
" | StrOutputParser()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "955a23db-5218-4c34-8486-450a2ddb3443",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that the input schema for our chain contains the required arguments, so it converts to a tool without further specification:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "2c9f6e61-80ed-4abb-8e77-84de3ccbc891",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'RunnableParallel<context,question,answer_style>Input',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {'question': {'title': 'Question'},\n",
|
||||
" 'answer_style': {'title': 'Answer Style'}}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rag_chain.input_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "a3f9cf5b-8c71-4b0f-902b-f92e028780c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"rag_tool = rag_chain.as_tool(\n",
|
||||
" name=\"pet_expert\",\n",
|
||||
" description=\"Get information about pets.\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4570615b-8f96-4d97-ae01-1c08b14be584",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Below we again invoke the agent. Note that the agent populates the required parameters in its `tool_calls`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "06409913-a2ad-400f-a202-7b8dd2ef483a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
|
||||
"----\n",
|
||||
"{'tools': {'messages': [ToolMessage(content='\"Dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.\"', name='pet_expert', tool_call_id='call_17iLPWvOD23zqwd1QVQ00Y63')]}}\n",
|
||||
"----\n",
|
||||
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
|
||||
"----\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent = create_react_agent(llm, [rag_tool])\n",
|
||||
"\n",
|
||||
"for chunk in agent.stream(\n",
|
||||
" {\"messages\": [(\"human\", \"What would a pirate say dogs are known for?\")]}\n",
|
||||
"):\n",
|
||||
" print(chunk)\n",
|
||||
" print(\"----\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "96cc9bc3-e79e-49a8-9915-428ea225358b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"See [LangSmith trace](https://smith.langchain.com/public/147ae4e6-4dfb-4dd9-8ca0-5c5b954f08ac/r) for the above run."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -58,6 +58,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import Runnable, RunnablePassthrough, chain\n",
|
||||
@@ -86,7 +88,7 @@
|
||||
" # NOTE: This is returning another Runnable, not an actual output.\n",
|
||||
" return contextualize_question\n",
|
||||
" else:\n",
|
||||
" return RunnablePassthrough()\n",
|
||||
" return RunnablePassthrough() | itemgetter(\"question\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@chain\n",
|
||||
|
||||
@@ -85,7 +85,7 @@ These are the core building blocks you can use when building applications.
|
||||
- [How to: stream tool calls](/docs/how_to/tool_streaming)
|
||||
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
|
||||
- [How to: bind model-specific formated tools](/docs/how_to/tools_model_specific)
|
||||
- [How to: force specific tool call](/docs/how_to/tool_choice)
|
||||
- [How to: force a specific tool call](/docs/how_to/tool_choice)
|
||||
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
|
||||
|
||||
### Messages
|
||||
@@ -187,6 +187,7 @@ LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to p
|
||||
|
||||
- [How to: create custom tools](/docs/how_to/custom_tools)
|
||||
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
|
||||
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
|
||||
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
|
||||
- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model)
|
||||
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
|
||||
@@ -194,6 +195,7 @@ LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to p
|
||||
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
|
||||
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
|
||||
- [How to: disable parallel tool calling](/docs/how_to/tool_choice)
|
||||
- [How to: stream events from within a tool](/docs/how_to/tool_stream_events)
|
||||
|
||||
### Multimodal
|
||||
|
||||
|
||||
@@ -6,6 +6,14 @@
|
||||
"source": [
|
||||
"# How to force tool calling behavior\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"In order to force our LLM to spelect a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to create tools](/docs/how_to/custom_tools)\n",
|
||||
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling)\n",
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
":::{.callout-info} Supported models\n",
|
||||
@@ -28,21 +28,17 @@
|
||||
"which shows how to create an agent that keeps track of a given user's favorite pets.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n",
|
||||
"There are times where tools need to use runtime values that should not be populated by the LLM. For example, the tool logic may require using the ID of the user who made the request. In this case, allowing the LLM to control the parameter is a security risk.\n",
|
||||
"\n",
|
||||
"Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n",
|
||||
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic. These defined parameters should not be part of the tool's final schema.\n",
|
||||
"\n",
|
||||
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.\n",
|
||||
"\n",
|
||||
"This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values."
|
||||
"This how-to guide shows some design patterns that create the tool dynamically at run time and binds appropriate values to them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can bind them to chat models as follows:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
@@ -55,25 +51,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
|
||||
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_openai\n",
|
||||
"%pip install -qU langchain_core langchain_openai\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -90,10 +75,17 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Passing request time information\n",
|
||||
"## Using the `curry` utility function\n",
|
||||
"\n",
|
||||
"The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n",
|
||||
"this information may be the user ID as resolved from the request itself."
|
||||
":::caution Compatibility\n",
|
||||
"\n",
|
||||
"This function is only available in `langchain_core>=0.2.17`.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"We can bind arguments to the tool's inner function via a utility wrapper. This will use a technique called [currying](https://en.wikipedia.org/wiki/Currying) to bind arguments to the function while also removing it from the function signature.\n",
|
||||
"\n",
|
||||
"Below, we initialize a tool that lists a user's favorite pet. It requires a `user_id` that we'll curry ahead of time."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -102,18 +94,98 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"from langchain_core.tools import StructuredTool\n",
|
||||
"from langchain_core.utils.curry import curry\n",
|
||||
"\n",
|
||||
"from langchain_core.output_parsers import JsonOutputParser\n",
|
||||
"from langchain_core.tools import BaseTool, tool"
|
||||
"user_to_pets = {\"eugene\": [\"cats\"]}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def list_favorite_pets(user_id: str) -> None:\n",
|
||||
" \"\"\"List favorite pets, if any.\"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"curried_function = curry(list_favorite_pets, user_id=\"eugene\")\n",
|
||||
"\n",
|
||||
"curried_tool = StructuredTool.from_function(curried_function)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we examine the schema of the curried tool, we can see that it no longer has `user_id` as part of its signature:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'title': 'list_favorite_petsSchema',\n",
|
||||
" 'description': 'List favorite pets, if any.',\n",
|
||||
" 'type': 'object',\n",
|
||||
" 'properties': {}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"curried_tool.input_schema.schema()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"But if we invoke it, we can see that it returns Eugene's favorite pets, `cats`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['cats']"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"curried_tool.invoke({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using scope\n",
|
||||
"\n",
|
||||
"We can achieve a similar result by wrapping the tool declarations themselves in a function. This lets us take advantage of the closure created by the wrapper to pass a variable into each tool. Here's an example:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import List\n",
|
||||
"\n",
|
||||
"from langchain_core.tools import BaseTool, tool\n",
|
||||
"\n",
|
||||
"user_to_pets = {}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -133,7 +205,7 @@
|
||||
"\n",
|
||||
" @tool\n",
|
||||
" def list_favorite_pets() -> None:\n",
|
||||
" \"\"\"List favorite pets if any.\"\"\"\n",
|
||||
" \"\"\"List favorite pets, if any.\"\"\"\n",
|
||||
" return user_to_pets.get(user_id, [])\n",
|
||||
"\n",
|
||||
" return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]"
|
||||
@@ -143,12 +215,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Verify that the tools work correctly"
|
||||
"Verify that the tools work correctly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -169,21 +241,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def handle_run_time_request(user_id: str, query: str):\n",
|
||||
" \"\"\"Handle run time request.\"\"\"\n",
|
||||
" tools = generate_tools_for_user(user_id)\n",
|
||||
" llm_with_tools = llm.bind_tools(tools)\n",
|
||||
" prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"system\", \"You are a helpful assistant.\")],\n",
|
||||
" )\n",
|
||||
" chain = prompt | llm_with_tools\n",
|
||||
" return llm_with_tools.invoke(query)"
|
||||
]
|
||||
},
|
||||
@@ -196,7 +261,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -204,10 +269,10 @@
|
||||
"text/plain": [
|
||||
"[{'name': 'update_favorite_pets',\n",
|
||||
" 'args': {'pets': ['cats', 'parrots']},\n",
|
||||
" 'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]"
|
||||
" 'id': 'call_c8agYHY1COFSAgwZR11OGCmQ'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -248,7 +313,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
286
docs/docs/how_to/tool_stream_events.ipynb
Normal file
286
docs/docs/how_to/tool_stream_events.ipynb
Normal file
@@ -0,0 +1,286 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# How to stream events from within a tool\n",
|
||||
"\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [Using stream events](/docs/how_to/streaming/#using-stream-events)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"If you have tools that call LLMs, retrievers, or other runnables, you may want to access internal events from those runnables. This guide shows you a few ways you can do this using the `astream_events()` method.\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"LangChain cannot automatically propagate callbacks to child runnables if you are running async code in python<=3.10.\n",
|
||||
" \n",
|
||||
"This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"We'll define a custom tool below that calls a chain that summarizes its input in a special way by prompting an LLM to return only 10 words, then reversing the output:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"model\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"%pip install -qU langchain langchain_anthropic\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"from langchain_anthropic import ChatAnthropic\n",
|
||||
"\n",
|
||||
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n",
|
||||
"\n",
|
||||
"model = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def special_summarization_tool(long_text: str) -> str:\n",
|
||||
" \"\"\"A tool that summarizes input text using advanced techniques.\"\"\"\n",
|
||||
" prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" def reverse(x: str):\n",
|
||||
" return x[::-1]\n",
|
||||
"\n",
|
||||
" chain = prompt | model | StrOutputParser() | reverse\n",
|
||||
" summary = chain.invoke({\"long_text\": long_text})\n",
|
||||
" return summary"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you just invoke the tool directly, you can see that you only get the final response:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'.yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"LONG_TEXT = \"\"\"\n",
|
||||
"NARRATOR:\n",
|
||||
"(Black screen with text; The sound of buzzing bees can be heard)\n",
|
||||
"According to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\n",
|
||||
"BARRY BENSON:\n",
|
||||
"(Barry is picking out a shirt)\n",
|
||||
"Yellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\n",
|
||||
"JANET BENSON:\n",
|
||||
"Barry! Breakfast is ready!\n",
|
||||
"BARRY:\n",
|
||||
"Coming! Hang on a second.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"special_summarization_tool.invoke({\"long_text\": LONG_TEXT})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If you wanted to access the raw output from the chat model, you could use the [`astream_events()`](/docs/how_to/streaming/#using-stream-events) method and look for `on_chat_model_end` events:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_end', 'data': {'output': AIMessage(content='Bee defies physics; Barry chooses outfit for graduation day.', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-195c0986-2ffa-43a3-9366-f2f96c42fe57', usage_metadata={'input_tokens': 182, 'output_tokens': 16, 'total_tokens': 198}), 'input': {'messages': [[HumanMessage(content=\"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n\\nNARRATOR:\\n(Black screen with text; The sound of buzzing bees can be heard)\\nAccording to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\\nBARRY BENSON:\\n(Barry is picking out a shirt)\\nYellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\\nJANET BENSON:\\nBarry! Breakfast is ready!\\nBARRY:\\nComing! Hang on a second.\\n\")]]}}, 'run_id': '195c0986-2ffa-43a3-9366-f2f96c42fe57', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['370919df-1bc3-43ae-aab2-8e112a4ddf47', 'de535624-278b-4927-9393-6d0cac3248df']}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"stream = special_summarization_tool.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for event in stream:\n",
|
||||
" if event[\"event\"] == \"on_chat_model_end\":\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And you can see that you get the raw response from the chat model.\n",
|
||||
"\n",
|
||||
"`astream_events()` will automatically call internal runnables in a chain with streaming enabled if possible, so if you wanted to a stream of tokens as they are generated from the chat model, you could simply filter our calls to look for `on_chat_model_stream` events with no other changes:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3')}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': 'cd8c1bd9-64d8-463c-a4d7-4bceed7911b3', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['8ddd1325-07c4-4213-8a2f-4462db8c6c70', '9f8654b4-b3f6-414e-b41d-dd201342a2fa']}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"stream = special_summarization_tool.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for event in stream:\n",
|
||||
" if event[\"event\"] == \"on_chat_model_stream\":\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Note that you still have access to the final tool response as well. You can access it by looking for an `on_tool_end` event.\n",
|
||||
"\n",
|
||||
"To make events your tool emits easier to identify, you can also add identifiers to runnables using the `with_config()` method. `run_name` will apply to only to the runnable you attach it to, while `tags` will be inherited by runnables called within your initial runnable.\n",
|
||||
"\n",
|
||||
"Let's redeclare the tool with a tag, then run it with `astream_events()` with some filters. You should only see streamed events from the chat model and the final tool output:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630')}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-696f4fc8-6c6f-46a0-8c82-e2e3f7625630', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': '696f4fc8-6c6f-46a0-8c82-e2e3f7625630', 'name': 'ChatAnthropic', 'tags': ['seq:step:2', 'bee_movie'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['49d9d7d3-2b02-4964-a6c5-12f57a063146', '8922d0e3-4199-4ba5-9a7a-fc4f2fca3e72']}\n",
|
||||
"{'event': 'on_tool_end', 'data': {'output': '.yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB'}, 'run_id': '49d9d7d3-2b02-4964-a6c5-12f57a063146', 'name': 'special_summarization_tool', 'tags': ['bee_movie'], 'metadata': {}, 'parent_ids': []}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tagged_tool = special_summarization_tool.with_config({\"tags\": [\"bee_movie\"]})\n",
|
||||
"\n",
|
||||
"stream = tagged_tool.astream_events(\n",
|
||||
" {\"long_text\": LONG_TEXT}, version=\"v2\", include_tags=[\"bee_movie\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"async for event in stream:\n",
|
||||
" event_type = event[\"event\"]\n",
|
||||
" if event_type == \"on_chat_model_stream\" or event_type == \"on_tool_end\":\n",
|
||||
" print(event)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Now you've learned how to stream events from within a tool. Next, you can learn more about how to use tools:\n",
|
||||
"\n",
|
||||
"- Bind [model-specific tools](/docs/how_to/tools_model_specific/)\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -7,9 +7,18 @@
|
||||
"source": [
|
||||
"# How to handle tool errors\n",
|
||||
"\n",
|
||||
"Using a model to invoke a tool has some obvious potential failure modes. Firstly, the model needs to return a output that can be parsed at all. Secondly, the model needs to return tool arguments that are valid.\n",
|
||||
":::info Prerequisites\n",
|
||||
"\n",
|
||||
"We can build error handling into our chains to mitigate these failure modes."
|
||||
"This guide assumes familiarity with the following concepts:\n",
|
||||
"- [Chat models](/docs/concepts/#chat-models)\n",
|
||||
"- [LangChain Tools](/docs/concepts/#tools)\n",
|
||||
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"Calling tools with an LLM is generally more reliable than pure prompting, but it isn't perfect. The model may try to call a tool that doesn't exist or fail to return arguments that match the requested schema. Strategies like keeping schemas simple, reducing the number of tools you pass at once, and having good names and descriptions can help mitigate this risk, but aren't foolproof.\n",
|
||||
"\n",
|
||||
"This guide covers some ways to build error handling into your chains to mitigate these failure modes."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -42,7 +51,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "08785b6d-722d-4620-b6ec-36deb3842c69",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -72,7 +81,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 4,
|
||||
"id": "86258950-5e61-4340-81b9-84a5d26e8773",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -82,12 +91,14 @@
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"id": "1d20604e-c4d1-4d21-841b-23e4f61aec36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -99,28 +110,13 @@
|
||||
"@tool\n",
|
||||
"def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:\n",
|
||||
" \"\"\"Do something complex with a complex tool.\"\"\"\n",
|
||||
" return int_arg * float_arg"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "553c2c13-28c8-4451-8a3a-6c31d52dc31d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
" return int_arg * float_arg\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm_with_tools = llm.bind_tools(\n",
|
||||
" [complex_tool],\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "802b2eca-9f79-4d6c-8257-85139ca5c752",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
")\n",
|
||||
"\n",
|
||||
"# Define chain\n",
|
||||
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool"
|
||||
]
|
||||
@@ -135,7 +131,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 6,
|
||||
"id": "d354664c-ac44-4967-a35f-8912b3ad9477",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -146,14 +142,14 @@
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:241\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 237\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 238\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 239\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 240\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 241\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 242\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 243\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 244\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 245\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 246\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 247\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 248\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:387\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 385\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 386\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 387\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 388\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 389\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:378\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 364\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_tool_start(\n\u001b[1;32m 365\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdescription\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdescription},\n\u001b[1;32m 366\u001b[0m tool_input \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tool_input, \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mstr\u001b[39m(tool_input),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 375\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 376\u001b[0m )\n\u001b[1;32m 377\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 378\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 379\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 380\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 382\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 383\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 384\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:283\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 282\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 283\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 284\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 285\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 286\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 287\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 288\u001b[0m }\n\u001b[1;32m 289\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
|
||||
"File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:526\u001b[0m, in \u001b[0;36mBaseModel.parse_obj\u001b[0;34m(cls, obj)\u001b[0m\n\u001b[1;32m 524\u001b[0m exc \u001b[38;5;241m=\u001b[39m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m expected dict not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mobj\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 525\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ValidationError([ErrorWrapper(exc, loc\u001b[38;5;241m=\u001b[39mROOT_KEY)], \u001b[38;5;28mcls\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m--> 526\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mobj\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:341\u001b[0m, in \u001b[0;36mBaseModel.__init__\u001b[0;34m(__pydantic_self__, **data)\u001b[0m\n\u001b[1;32m 339\u001b[0m values, fields_set, validation_error \u001b[38;5;241m=\u001b[39m validate_model(__pydantic_self__\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m, data)\n\u001b[1;32m 340\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m validation_error:\n\u001b[0;32m--> 341\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m validation_error\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 343\u001b[0m object_setattr(__pydantic_self__, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__dict__\u001b[39m\u001b[38;5;124m'\u001b[39m, values)\n",
|
||||
"Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/runnables/base.py:2572\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 2570\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m step\u001b[38;5;241m.\u001b[39minvoke(\u001b[38;5;28minput\u001b[39m, config, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 2571\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2572\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2573\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2574\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:380\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 373\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 374\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 375\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 376\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 377\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 378\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 379\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 380\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 381\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 382\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 383\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 384\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 385\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 386\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 387\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 388\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 389\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:537\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 535\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 536\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 537\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 538\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 539\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:526\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 524\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 525\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(_set_config_context, child_config)\n\u001b[0;32m--> 526\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 527\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 528\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 529\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(\n\u001b[1;32m 530\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m context\u001b[38;5;241m.\u001b[39mrun(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 534\u001b[0m )\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:424\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 422\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 423\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 424\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 425\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 426\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 427\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 428\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 429\u001b[0m }\n\u001b[1;32m 430\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:526\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.parse_obj\u001b[0;34m()\u001b[0m\n",
|
||||
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
|
||||
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for complex_toolSchema\ndict_arg\n field required (type=value_error.missing)"
|
||||
]
|
||||
}
|
||||
@@ -176,10 +172,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 8,
|
||||
"id": "8fedb550-683d-45ae-8876-ae7acb332019",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Calling tool with arguments:\n",
|
||||
"\n",
|
||||
"{'int_arg': 5, 'float_arg': 2.1}\n",
|
||||
"\n",
|
||||
"raised the following error:\n",
|
||||
"\n",
|
||||
"<class 'pydantic.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
|
||||
"dict_arg\n",
|
||||
" field required (type=value_error.missing)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from typing import Any\n",
|
||||
"\n",
|
||||
@@ -193,32 +205,8 @@
|
||||
" return f\"Calling tool with arguments:\\n\\n{tool_args}\\n\\nraised the following error:\\n\\n{type(e)}: {e}\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "71a2c98d-c0be-4c0a-bb3d-41ad4596526c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Calling tool with arguments:\n",
|
||||
"\n",
|
||||
"{'int_arg': 5, 'float_arg': 2.1}\n",
|
||||
"\n",
|
||||
"raised the following error:\n",
|
||||
"\n",
|
||||
"<class 'pydantic.v1.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
|
||||
"dict_arg\n",
|
||||
" field required (type=value_error.missing)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool\n",
|
||||
"\n",
|
||||
"print(\n",
|
||||
" chain.invoke(\n",
|
||||
" \"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg\"\n",
|
||||
@@ -238,7 +226,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 10,
|
||||
"id": "02cc4223-35fa-4240-976a-012299ca703c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -248,19 +236,22 @@
|
||||
"10.5"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n",
|
||||
"\n",
|
||||
"better_model = ChatOpenAI(model=\"gpt-4-1106-preview\", temperature=0).bind_tools(\n",
|
||||
" [complex_tool], tool_choice=\"complex_tool\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"better_chain = better_model | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n",
|
||||
"\n",
|
||||
"chain_with_fallback = chain.with_fallbacks([better_chain])\n",
|
||||
"\n",
|
||||
"chain_with_fallback.invoke(\n",
|
||||
" \"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg\"\n",
|
||||
")"
|
||||
@@ -271,7 +262,7 @@
|
||||
"id": "412f8c4e-cc83-4d87-84a1-5ba2f8edb1e9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Looking at the [Langsmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds."
|
||||
"Looking at the [LangSmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -286,17 +277,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 11,
|
||||
"id": "b5659956-9454-468a-9753-a3ff9052b8f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"from typing import Any\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class CustomToolException(Exception):\n",
|
||||
@@ -336,7 +323,7 @@
|
||||
"# affect the prompt at all, but gives us the option to insert an arbitrary list of Messages\n",
|
||||
"# into the prompt if needed. We'll use this on retries to insert the error message.\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [(\"human\", \"{input}\"), MessagesPlaceholder(\"last_output\", optional=True)]\n",
|
||||
" [(\"human\", \"{input}\"), (\"placeholder\", \"{last_output}\")]\n",
|
||||
")\n",
|
||||
"chain = prompt | llm_with_tools | tool_custom_exception\n",
|
||||
"\n",
|
||||
@@ -348,7 +335,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 12,
|
||||
"id": "4c45f5bd-cbb4-47d5-b4b6-aec50673c750",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -358,7 +345,7 @@
|
||||
"10.5"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -378,6 +365,24 @@
|
||||
"source": [
|
||||
"And our chain succeeds! Looking at the [LangSmith trace](https://smith.langchain.com/public/c11e804c-e14f-4059-bd09-64766f999c14/r), we can see that indeed our initial chain still fails, and it's only on retrying that the chain succeeds."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6b97af9f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Now you've seen some strategies how to handle tool calling errors. Next, you can learn more about how to use tools:\n",
|
||||
"\n",
|
||||
"- Few shot prompting [with tools](/docs/how_to/tools_few_shot/)\n",
|
||||
"- Stream [tool calls](/docs/how_to/tool_streaming/)\n",
|
||||
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
|
||||
"\n",
|
||||
"You can also check out some more specific uses of tool calling:\n",
|
||||
"\n",
|
||||
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -396,7 +401,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.1"
|
||||
"version": "3.10.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "bd931196",
|
||||
"metadata": {
|
||||
"vscode": {
|
||||
"languageId": "raw"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "344fc5a3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "a792e839",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "61c2629c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "e329385c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "3169f380",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "87552e5a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "9a10cdcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "41200199",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_class_name: hidden\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1f3a5ebf",
|
||||
|
||||
@@ -14,21 +14,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet gpt4all > /dev/null"
|
||||
"%pip install --upgrade --quiet langchain-community gpt4all"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -47,9 +39,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.llms import GPT4All\n",
|
||||
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
|
||||
"from langchain_core.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
@@ -92,64 +82,79 @@
|
||||
"\n",
|
||||
"For more info, visit https://github.com/nomic-ai/gpt4all.\n",
|
||||
"\n",
|
||||
"---"
|
||||
"---\n",
|
||||
"\n",
|
||||
"This integration does not yet support streaming in chunks via the [`.stream()`](https://python.langchain.com/v0.2/docs/how_to/streaming/) method. The below example uses a callback handler with `streaming=True`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"local_path = (\n",
|
||||
" \"./models/ggml-gpt4all-l13b-snoozy.bin\" # replace with your desired local file path\n",
|
||||
" \"./models/Meta-Llama-3-8B-Instruct.Q4_0.gguf\" # replace with your local file path\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Token: Justin\n",
|
||||
"Token: Bieber\n",
|
||||
"Token: was\n",
|
||||
"Token: born\n",
|
||||
"Token: on\n",
|
||||
"Token: March\n",
|
||||
"Token: \n",
|
||||
"Token: 1\n",
|
||||
"Token: ,\n",
|
||||
"Token: \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Callbacks support token-wise streaming\n",
|
||||
"callbacks = [StreamingStdOutCallbackHandler()]\n",
|
||||
"from langchain_core.callbacks import BaseCallbackHandler\n",
|
||||
"\n",
|
||||
"count = 0\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyCustomHandler(BaseCallbackHandler):\n",
|
||||
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
|
||||
" global count\n",
|
||||
" if count < 10:\n",
|
||||
" print(f\"Token: {token}\")\n",
|
||||
" count += 1\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Verbose is required to pass to the callback manager\n",
|
||||
"llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)\n",
|
||||
"llm = GPT4All(model=local_path, callbacks=[MyCustomHandler()], streaming=True)\n",
|
||||
"\n",
|
||||
"# If you want to use a custom model add the backend parameter\n",
|
||||
"# Check https://docs.gpt4all.io/gpt4all_python.html for supported backends\n",
|
||||
"llm = GPT4All(model=local_path, backend=\"gptj\", callbacks=callbacks, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# llm = GPT4All(model=local_path, backend=\"gptj\", callbacks=callbacks, streaming=True)\n",
|
||||
"\n",
|
||||
"chain = prompt | llm\n",
|
||||
"\n",
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
"# Streamed tokens will be logged/aggregated via the passed callback\n",
|
||||
"res = chain.invoke({\"question\": question})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Justin Bieber was born on March 1, 1994. In 1994, The Cowboys won Super Bowl XXVIII."
|
||||
]
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -143,6 +143,25 @@
|
||||
"print(chain.invoke({\"question\": question}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5141dc4d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Streaming repsonse."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f1819250-2db9-4143-b88a-12e92d4e2386",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for chunk in chain.stream(question):\n",
|
||||
" print(chunk, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dbbc3a37",
|
||||
|
||||
@@ -245,7 +245,7 @@
|
||||
"source": [
|
||||
"### Streaming\n",
|
||||
"\n",
|
||||
"To get streaming of LLM output, you can create a Huggingface `TextIteratorStreamer` for `_forward_params`."
|
||||
"You can use `stream` method to get a streaming of LLM output, "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -255,24 +255,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from threading import Thread\n",
|
||||
"generation_config = {\"skip_prompt\": True, \"pipeline_kwargs\": {\"max_new_tokens\": 100}}\n",
|
||||
"chain = prompt | ov_llm.bind(**generation_config)\n",
|
||||
"\n",
|
||||
"from transformers import TextIteratorStreamer\n",
|
||||
"\n",
|
||||
"streamer = TextIteratorStreamer(\n",
|
||||
" ov_llm.pipeline.tokenizer,\n",
|
||||
" timeout=30.0,\n",
|
||||
" skip_prompt=True,\n",
|
||||
" skip_special_tokens=True,\n",
|
||||
")\n",
|
||||
"pipeline_kwargs = {\"pipeline_kwargs\": {\"streamer\": streamer, \"max_new_tokens\": 100}}\n",
|
||||
"chain = prompt | ov_llm.bind(**pipeline_kwargs)\n",
|
||||
"\n",
|
||||
"t1 = Thread(target=chain.invoke, args=({\"question\": question},))\n",
|
||||
"t1.start()\n",
|
||||
"\n",
|
||||
"for new_text in streamer:\n",
|
||||
" print(new_text, end=\"\", flush=True)"
|
||||
"for chunk in chain.stream(question):\n",
|
||||
" print(chunk, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@ This page covers how to use the `GPT4All` wrapper within LangChain. The tutorial
|
||||
- Install the Python package with `pip install gpt4all`
|
||||
- Download a [GPT4All model](https://gpt4all.io/index.html) and place it in your desired directory
|
||||
|
||||
In this example, We are using `mistral-7b-openorca.Q4_0.gguf`(Best overall fast chat model):
|
||||
In this example, we are using `mistral-7b-openorca.Q4_0.gguf`:
|
||||
|
||||
```bash
|
||||
mkdir models
|
||||
@@ -30,7 +30,7 @@ model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8)
|
||||
response = model.invoke("Once upon a time, ")
|
||||
```
|
||||
|
||||
You can also customize the generation parameters, such as n_predict, temp, top_p, top_k, and others.
|
||||
You can also customize the generation parameters, such as `n_predict`, `temp`, `top_p`, `top_k`, and others.
|
||||
|
||||
To stream the model's predictions, add in a CallbackManager.
|
||||
|
||||
@@ -45,11 +45,11 @@ callbacks = [StreamingStdOutCallbackHandler()]
|
||||
model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8)
|
||||
|
||||
# Generate text. Tokens are streamed through the callback manager.
|
||||
model("Once upon a time, ", callbacks=callbacks)
|
||||
model.invoke("Once upon a time, ", callbacks=callbacks)
|
||||
```
|
||||
|
||||
## Model File
|
||||
|
||||
You can find links to model file downloads in the [https://gpt4all.io/](https://gpt4all.io/index.html).
|
||||
You can download model files from the GPT4All client. You can download the client from the [GPT4All](https://gpt4all.io/index.html) website.
|
||||
|
||||
For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/gpt4all)
|
||||
|
||||
23
docs/ignore-step.sh
Executable file
23
docs/ignore-step.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "VERCEL_ENV: $VERCEL_ENV"
|
||||
echo "VERCEL_GIT_COMMIT_REF: $VERCEL_GIT_COMMIT_REF"
|
||||
|
||||
|
||||
if [ "$VERCEL_ENV" == "production" ] || [ "$VERCEL_GIT_COMMIT_REF" == "master" ] || [ "$VERCEL_GIT_COMMIT_REF" == "v0.1" ]; then
|
||||
echo "✅ Production build - proceeding with build"
|
||||
exit 1;
|
||||
else
|
||||
echo "Checking for changes in docs/ and templates/:"
|
||||
echo "---"
|
||||
git log -n 50 --pretty=format:"%s" -- . ../templates | grep -v '(#'
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "---"
|
||||
echo "✅ Changes detected in docs/ or templates/ - proceeding with build"
|
||||
exit 1
|
||||
else
|
||||
echo "---"
|
||||
echo "🛑 No changes detected in docs/ or templates/ - ignoring build"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
@@ -23,6 +23,18 @@ The following table shows the feature support for all document loaders.
|
||||
|
||||
"""
|
||||
|
||||
DEPRECATED = [
|
||||
"AirbyteCDKLoader",
|
||||
"AirbyteGongLoader",
|
||||
"AirbyteHubspotLoader",
|
||||
"AirbyteJSONLoader",
|
||||
"AirbyteSalesforceLoader",
|
||||
"AirbyteShopifyLoader",
|
||||
"AirbyteStripeLoader",
|
||||
"AirbyteTypeformLoader",
|
||||
"AirbyteZendeskSupportLoader",
|
||||
]
|
||||
|
||||
|
||||
def get_document_loader_table() -> str:
|
||||
"""Get the table of document loaders."""
|
||||
@@ -55,7 +67,7 @@ def get_document_loader_table() -> str:
|
||||
title = ["Document Loader", "Description", "Lazy loading", "Native async support"]
|
||||
rows = [title, [":-"] * 2 + [":-:"] * (len(title) - 2)]
|
||||
for loader, feats in sorted(doc_loaders_feat_table.items()):
|
||||
if not feats:
|
||||
if not feats or loader in DEPRECATED:
|
||||
continue
|
||||
rows += [
|
||||
[loader, feats["description"]]
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"buildCommand": "yarn build",
|
||||
"outputDirectory": "build",
|
||||
"ignoreCommand": "bash ignore-step.sh",
|
||||
"trailingSlash": true,
|
||||
"rewrites": [
|
||||
{
|
||||
|
||||
@@ -46,7 +46,7 @@ lint_tests: MYPY_CACHE=.mypy_cache_test
|
||||
|
||||
lint lint_diff lint_package lint_tests:
|
||||
./scripts/check_pydantic.sh .
|
||||
./scripts/lint_imports.sh
|
||||
./scripts/lint_imports.sh .
|
||||
./scripts/check_pickle.sh .
|
||||
poetry run ruff check .
|
||||
[ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff
|
||||
|
||||
@@ -63,7 +63,7 @@ class FileManagementToolkit(BaseToolkit):
|
||||
selected_tools: Optional[List[str]] = None
|
||||
"""If provided, only provide the selected tools. Defaults to all."""
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_tools(cls, values: dict) -> dict:
|
||||
selected_tools = values.get("selected_tools") or []
|
||||
for tool_name in selected_tools:
|
||||
|
||||
@@ -74,7 +74,7 @@ class PlayWrightBrowserToolkit(BaseToolkit):
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_imports_and_browser_provided(cls, values: dict) -> dict:
|
||||
"""Check that the arguments are valid."""
|
||||
lazy_import_playwright_browsers()
|
||||
|
||||
@@ -223,19 +223,23 @@ class OpenAICallbackHandler(BaseCallbackHandler):
|
||||
message = generation.message
|
||||
if isinstance(message, AIMessage):
|
||||
usage_metadata = message.usage_metadata
|
||||
response_metadata = message.response_metadata
|
||||
else:
|
||||
usage_metadata = None
|
||||
response_metadata = None
|
||||
except AttributeError:
|
||||
usage_metadata = None
|
||||
response_metadata = None
|
||||
else:
|
||||
usage_metadata = None
|
||||
response_metadata = None
|
||||
if usage_metadata:
|
||||
token_usage = {"total_tokens": usage_metadata["total_tokens"]}
|
||||
completion_tokens = usage_metadata["output_tokens"]
|
||||
prompt_tokens = usage_metadata["input_tokens"]
|
||||
if response.llm_output is None:
|
||||
# model name (and therefore cost) is unavailable in
|
||||
# streaming responses
|
||||
if response_model_name := (response_metadata or {}).get("model_name"):
|
||||
model_name = standardize_model_name(response_model_name)
|
||||
elif response.llm_output is None:
|
||||
model_name = ""
|
||||
else:
|
||||
model_name = standardize_model_name(
|
||||
|
||||
@@ -113,7 +113,33 @@ _warned_once_already = False
|
||||
|
||||
|
||||
class SQLChatMessageHistory(BaseChatMessageHistory):
|
||||
"""Chat message history stored in an SQL database."""
|
||||
"""Chat message history stored in an SQL database.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from langchain_community.chat_message_histories import SQLChatMessageHistory
|
||||
|
||||
# create sync sql message history by connection_string
|
||||
message_history = SQLChatMessageHistory(
|
||||
session_id='foo', connection_string='sqlite///:memory.db'
|
||||
)
|
||||
message_history.add_message(HumanMessage("hello"))
|
||||
message_history.message
|
||||
|
||||
# create async sql message history using aiosqlite
|
||||
# from sqlalchemy.ext.asyncio import create_async_engine
|
||||
#
|
||||
# async_engine = create_async_engine("sqlite+aiosqlite:///memory.db")
|
||||
# async_message_history = SQLChatMessageHistory(
|
||||
# session_id='foo', connection=async_engine,
|
||||
# )
|
||||
# await async_message_history.aadd_message(HumanMessage("hello"))
|
||||
# await async_message_history.aget_messages()
|
||||
|
||||
"""
|
||||
|
||||
@property
|
||||
@deprecated("0.2.2", removal="0.3.0", alternative="session_maker")
|
||||
@@ -131,6 +157,21 @@ class SQLChatMessageHistory(BaseChatMessageHistory):
|
||||
engine_args: Optional[Dict[str, Any]] = None,
|
||||
async_mode: Optional[bool] = None, # Use only if connection is a string
|
||||
):
|
||||
"""Initialize with a SQLChatMessageHistory instance.
|
||||
|
||||
Args:
|
||||
session_id: Indicates the id of the same session.
|
||||
connection_string: String parameter configuration for connecting
|
||||
to the database.
|
||||
table_name: Table name used to save data.
|
||||
session_id_field_name: The name of field of `session_id`.
|
||||
custom_message_converter: Custom message converter for converting
|
||||
database data and `BaseMessage`
|
||||
connection: Database connection object, which can be a string containing
|
||||
connection configuration, Engine object or AsyncEngine object.
|
||||
engine_args: Additional configuration for creating database engines.
|
||||
async_mode: Whether it is an asynchronous connection.
|
||||
"""
|
||||
assert not (
|
||||
connection_string and connection
|
||||
), "connection_string and connection are mutually exclusive"
|
||||
|
||||
@@ -116,18 +116,133 @@ def _convert_delta_to_message_chunk(
|
||||
|
||||
|
||||
class MiniMaxChat(BaseChatModel):
|
||||
"""MiniMax large language models.
|
||||
"""MiniMax chat model integration.
|
||||
|
||||
To use, you should have the environment variable``MINIMAX_API_KEY`` set with
|
||||
your API token, or pass it as a named parameter to the constructor.
|
||||
Setup:
|
||||
To use, you should have the environment variable``MINIMAX_API_KEY`` set with
|
||||
your API KEY.
|
||||
|
||||
Example:
|
||||
.. code-block:: bash
|
||||
|
||||
export MINIMAX_API_KEY="your-api-key"
|
||||
|
||||
Key init args — completion params:
|
||||
model: Optional[str]
|
||||
Name of MiniMax model to use.
|
||||
max_tokens: Optional[int]
|
||||
Max number of tokens to generate.
|
||||
temperature: Optional[float]
|
||||
Sampling temperature.
|
||||
top_p: Optional[float]
|
||||
Total probability mass of tokens to consider at each step.
|
||||
streaming: Optional[bool]
|
||||
Whether to stream the results or not.
|
||||
|
||||
Key init args — client params:
|
||||
api_key: Optional[str]
|
||||
MiniMax API key. If not passed in will be read from env var MINIMAX_API_KEY.
|
||||
base_url: Optional[str]
|
||||
Base URL for API requests.
|
||||
|
||||
See full list of supported init args and their descriptions in the params section.
|
||||
|
||||
Instantiate:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.chat_models import MiniMaxChat
|
||||
llm = MiniMaxChat(model="abab5-chat")
|
||||
|
||||
"""
|
||||
chat = MiniMaxChat(
|
||||
api_key=api_key,
|
||||
model='abab6.5-chat',
|
||||
# temperature=...,
|
||||
# other params...
|
||||
)
|
||||
|
||||
Invoke:
|
||||
.. code-block:: python
|
||||
|
||||
messages = [
|
||||
("system", "你是一名专业的翻译家,可以将用户的中文翻译为英文。"),
|
||||
("human", "我喜欢编程。"),
|
||||
]
|
||||
chat.invoke(messages)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessage(
|
||||
content='I enjoy programming.',
|
||||
response_metadata={
|
||||
'token_usage': {'total_tokens': 48},
|
||||
'model_name': 'abab6.5-chat',
|
||||
'finish_reason': 'stop'
|
||||
},
|
||||
id='run-42d62ba6-5dc1-4e16-98dc-f72708a4162d-0'
|
||||
)
|
||||
|
||||
Stream:
|
||||
.. code-block:: python
|
||||
|
||||
for chunk in chat.stream(messages):
|
||||
print(chunk)
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
content='I' id='run-a5837c45-4aaa-4f64-9ab4-2679bbd55522'
|
||||
content=' enjoy programming.' response_metadata={'finish_reason': 'stop'} id='run-a5837c45-4aaa-4f64-9ab4-2679bbd55522'
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
stream = chat.stream(messages)
|
||||
full = next(stream)
|
||||
for chunk in stream:
|
||||
full += chunk
|
||||
full
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessageChunk(
|
||||
content='I enjoy programming.',
|
||||
response_metadata={'finish_reason': 'stop'},
|
||||
id='run-01aed0a0-61c4-4709-be22-c6d8b17155d6'
|
||||
)
|
||||
|
||||
Async:
|
||||
.. code-block:: python
|
||||
|
||||
await chat.ainvoke(messages)
|
||||
|
||||
# stream
|
||||
# async for chunk in chat.astream(messages):
|
||||
# print(chunk)
|
||||
|
||||
# batch
|
||||
# await chat.abatch([messages])
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
AIMessage(
|
||||
content='I enjoy programming.',
|
||||
response_metadata={
|
||||
'token_usage': {'total_tokens': 48},
|
||||
'model_name': 'abab6.5-chat',
|
||||
'finish_reason': 'stop'
|
||||
},
|
||||
id='run-c263b6f1-1736-4ece-a895-055c26b3436f-0'
|
||||
)
|
||||
|
||||
Response metadata
|
||||
.. code-block:: python
|
||||
|
||||
ai_msg = chat.invoke(messages)
|
||||
ai_msg.response_metadata
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'token_usage': {'total_tokens': 48},
|
||||
'model_name': 'abab6.5-chat',
|
||||
'finish_reason': 'stop'}
|
||||
|
||||
""" # noqa: E501conj
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
|
||||
@@ -81,7 +81,7 @@ class DocugamiLoader(BaseLoader, BaseModel):
|
||||
include_project_metadata_in_doc_metadata: bool = True
|
||||
"""Set to True if you want to include the project metadata in the doc metadata."""
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate that either local file paths are given, or remote API docset ID.
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ class DropboxLoader(BaseLoader, BaseModel):
|
||||
recursive: bool = False
|
||||
"""Flag to indicate whether to load files recursively from subfolders."""
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate that either folder_path or file_paths is set, but not both."""
|
||||
if (
|
||||
|
||||
@@ -53,7 +53,7 @@ class GoogleDriveLoader(BaseLoader, BaseModel):
|
||||
file_loader_kwargs: Dict["str", Any] = {}
|
||||
"""The file loader kwargs to use."""
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Validate that either folder_id or document_ids is set, but not both."""
|
||||
if values.get("folder_id") and (
|
||||
|
||||
@@ -47,7 +47,7 @@ class GoogleApiClient:
|
||||
def __post_init__(self) -> None:
|
||||
self.creds = self._load_credentials()
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_channel_or_videoIds_is_set(
|
||||
cls, values: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
@@ -388,7 +388,7 @@ class GoogleApiYoutubeLoader(BaseLoader):
|
||||
|
||||
return build("youtube", "v3", credentials=creds)
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_channel_or_videoIds_is_set(
|
||||
cls, values: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
|
||||
@@ -54,8 +54,10 @@ class AscendEmbeddings(Embeddings, BaseModel):
|
||||
self.model.half()
|
||||
self.encode([f"warmup {i} times" for i in range(10)])
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
if "model_path" not in values:
|
||||
raise ValueError("model_path is required")
|
||||
if not os.access(values["model_path"], os.F_OK):
|
||||
raise FileNotFoundError(
|
||||
f"Unabled to find valid model path in [{values['model_path']}]"
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
from langchain_community.graph_vectorstores.extractors.html_link_extractor import (
|
||||
HtmlInput,
|
||||
HtmlLinkExtractor,
|
||||
)
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
)
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor_adapter import (
|
||||
LinkExtractorAdapter,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"LinkExtractor",
|
||||
"LinkExtractorAdapter",
|
||||
"HtmlInput",
|
||||
"HtmlLinkExtractor",
|
||||
]
|
||||
@@ -0,0 +1,124 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, List, Optional, Set, Union
|
||||
from urllib.parse import urldefrag, urljoin, urlparse
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.graph_vectorstores import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
)
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor_adapter import (
|
||||
LinkExtractorAdapter,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
|
||||
|
||||
def _parse_url(link: Tag, page_url: str, drop_fragments: bool = True) -> Optional[str]:
|
||||
href = link.get("href")
|
||||
if href is None:
|
||||
return None
|
||||
url = urlparse(href)
|
||||
if url.scheme not in ["http", "https", ""]:
|
||||
return None
|
||||
|
||||
# Join the HREF with the page_url to convert relative paths to absolute.
|
||||
url = str(urljoin(page_url, href))
|
||||
|
||||
# Fragments would be useful if we chunked a page based on section.
|
||||
# Then, each chunk would have a different URL based on the fragment.
|
||||
# Since we aren't doing that yet, they just "break" links. So, drop
|
||||
# the fragment.
|
||||
if drop_fragments:
|
||||
return urldefrag(url).url
|
||||
return url
|
||||
|
||||
|
||||
def _parse_hrefs(
|
||||
soup: BeautifulSoup, url: str, drop_fragments: bool = True
|
||||
) -> Set[str]:
|
||||
soup_links: List[Tag] = soup.find_all("a")
|
||||
links: Set[str] = set()
|
||||
|
||||
for link in soup_links:
|
||||
parse_url = _parse_url(link, page_url=url, drop_fragments=drop_fragments)
|
||||
# Remove self links and entries for any 'a' tag that failed to parse
|
||||
# (didn't have href, or invalid domain, etc.)
|
||||
if parse_url and parse_url != url:
|
||||
links.add(parse_url)
|
||||
|
||||
return links
|
||||
|
||||
|
||||
@dataclass
|
||||
class HtmlInput:
|
||||
content: Union[str, BeautifulSoup]
|
||||
base_url: str
|
||||
|
||||
|
||||
class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
|
||||
def __init__(self, *, kind: str = "hyperlink", drop_fragments: bool = True):
|
||||
"""Extract hyperlinks from HTML content.
|
||||
|
||||
Expects the input to be an HTML string or a `BeautifulSoup` object.
|
||||
|
||||
Args:
|
||||
kind: The kind of edge to extract. Defaults to "hyperlink".
|
||||
drop_fragments: Whether fragments in URLs and links should be
|
||||
dropped. Defaults to `True`.
|
||||
"""
|
||||
try:
|
||||
import bs4 # noqa:F401
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"BeautifulSoup4 is required for HtmlLinkExtractor. "
|
||||
"Please install it with `pip install beautifulsoup4`."
|
||||
) from e
|
||||
|
||||
self._kind = kind
|
||||
self.drop_fragments = drop_fragments
|
||||
|
||||
def as_document_extractor(
|
||||
self, url_metadata_key: str = "source"
|
||||
) -> LinkExtractor[Document]:
|
||||
"""Return a LinkExtractor that applies to documents.
|
||||
|
||||
NOTE: Since the HtmlLinkExtractor parses HTML, if you use with other similar
|
||||
link extractors it may be more efficient to call the link extractors directly
|
||||
on the parsed BeautifulSoup object.
|
||||
|
||||
Args:
|
||||
url_metadata_key: The name of the filed in document metadata with the URL of
|
||||
the document.
|
||||
"""
|
||||
return LinkExtractorAdapter(
|
||||
underlying=self,
|
||||
transform=lambda doc: HtmlInput(
|
||||
doc.page_content, doc.metadata[url_metadata_key]
|
||||
),
|
||||
)
|
||||
|
||||
def extract_one(
|
||||
self,
|
||||
input: HtmlInput, # noqa: A002
|
||||
) -> Set[Link]:
|
||||
content = input.content
|
||||
if isinstance(content, str):
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
content = BeautifulSoup(content, "html.parser")
|
||||
|
||||
base_url = input.base_url
|
||||
if self.drop_fragments:
|
||||
base_url = urldefrag(base_url).url
|
||||
|
||||
hrefs = _parse_hrefs(content, base_url, self.drop_fragments)
|
||||
|
||||
links = {Link.outgoing(kind=self._kind, tag=url) for url in hrefs}
|
||||
links.add(Link.incoming(kind=self._kind, tag=base_url))
|
||||
return links
|
||||
@@ -0,0 +1,36 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Generic, Iterable, Set, TypeVar
|
||||
|
||||
from langchain_core.graph_vectorstores import Link
|
||||
|
||||
InputT = TypeVar("InputT")
|
||||
|
||||
METADATA_LINKS_KEY = "links"
|
||||
|
||||
|
||||
class LinkExtractor(ABC, Generic[InputT]):
|
||||
"""Interface for extracting links (incoming, outgoing, bidirectional)."""
|
||||
|
||||
@abstractmethod
|
||||
def extract_one(self, input: InputT) -> set[Link]: # noqa: A002
|
||||
"""Add edges from each `input` to the corresponding documents.
|
||||
|
||||
Args:
|
||||
input: The input content to extract edges from.
|
||||
|
||||
Returns:
|
||||
Set of links extracted from the input.
|
||||
"""
|
||||
|
||||
def extract_many(self, inputs: Iterable[InputT]) -> Iterable[Set[Link]]:
|
||||
"""Add edges from each `input` to the corresponding documents.
|
||||
|
||||
Args:
|
||||
inputs: The input content to extract edges from.
|
||||
|
||||
Returns:
|
||||
Iterable over the set of links extracted from the input.
|
||||
"""
|
||||
return map(self.extract_one, inputs)
|
||||
@@ -0,0 +1,27 @@
|
||||
from typing import Callable, Iterable, Set, TypeVar
|
||||
|
||||
from langchain_core.graph_vectorstores import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
)
|
||||
|
||||
InputT = TypeVar("InputT")
|
||||
UnderlyingInputT = TypeVar("UnderlyingInputT")
|
||||
|
||||
|
||||
class LinkExtractorAdapter(LinkExtractor[InputT]):
|
||||
def __init__(
|
||||
self,
|
||||
underlying: LinkExtractor[UnderlyingInputT],
|
||||
transform: Callable[[InputT], UnderlyingInputT],
|
||||
) -> None:
|
||||
self._underlying = underlying
|
||||
self._transform = transform
|
||||
|
||||
def extract_one(self, input: InputT) -> Set[Link]: # noqa: A002
|
||||
return self._underlying.extract_one(self._transform(input))
|
||||
|
||||
def extract_many(self, inputs: Iterable[InputT]) -> Iterable[Set[Link]]:
|
||||
underlying_inputs = map(self._transform, inputs)
|
||||
return self._underlying.extract_many(underlying_inputs)
|
||||
@@ -16,7 +16,7 @@ from langchain_core.callbacks import (
|
||||
)
|
||||
from langchain_core.language_models.llms import LLM
|
||||
from langchain_core.outputs import GenerationChunk
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.pydantic_v1 import Field, SecretStr
|
||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -49,8 +49,8 @@ class QianfanLLMEndpoint(LLM):
|
||||
|
||||
client: Any
|
||||
|
||||
qianfan_ak: Optional[str] = None
|
||||
qianfan_sk: Optional[str] = None
|
||||
qianfan_ak: Optional[SecretStr] = None
|
||||
qianfan_sk: Optional[SecretStr] = None
|
||||
|
||||
streaming: Optional[bool] = False
|
||||
"""Whether to stream the results or not."""
|
||||
|
||||
@@ -2,12 +2,12 @@ from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import logging
|
||||
from typing import Any, List, Mapping, Optional
|
||||
from typing import Any, Iterator, List, Mapping, Optional
|
||||
|
||||
from langchain_core._api.deprecation import deprecated
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models.llms import BaseLLM
|
||||
from langchain_core.outputs import Generation, LLMResult
|
||||
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
|
||||
from langchain_core.pydantic_v1 import Extra
|
||||
|
||||
DEFAULT_MODEL_ID = "gpt2"
|
||||
@@ -303,3 +303,63 @@ class HuggingFacePipeline(BaseLLM):
|
||||
return LLMResult(
|
||||
generations=[[Generation(text=text)] for text in text_generations]
|
||||
)
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[GenerationChunk]:
|
||||
from threading import Thread
|
||||
|
||||
import torch
|
||||
from transformers import (
|
||||
StoppingCriteria,
|
||||
StoppingCriteriaList,
|
||||
TextIteratorStreamer,
|
||||
)
|
||||
|
||||
pipeline_kwargs = kwargs.get("pipeline_kwargs", {})
|
||||
skip_prompt = kwargs.get("skip_prompt", True)
|
||||
|
||||
if stop is not None:
|
||||
stop = self.pipeline.tokenizer.convert_tokens_to_ids(stop)
|
||||
stopping_ids_list = stop or []
|
||||
|
||||
class StopOnTokens(StoppingCriteria):
|
||||
def __call__(
|
||||
self,
|
||||
input_ids: torch.LongTensor,
|
||||
scores: torch.FloatTensor,
|
||||
**kwargs: Any,
|
||||
) -> bool:
|
||||
for stop_id in stopping_ids_list:
|
||||
if input_ids[0][-1] == stop_id:
|
||||
return True
|
||||
return False
|
||||
|
||||
stopping_criteria = StoppingCriteriaList([StopOnTokens()])
|
||||
|
||||
inputs = self.pipeline.tokenizer(prompt, return_tensors="pt")
|
||||
streamer = TextIteratorStreamer(
|
||||
self.pipeline.tokenizer,
|
||||
timeout=60.0,
|
||||
skip_prompt=skip_prompt,
|
||||
skip_special_tokens=True,
|
||||
)
|
||||
generation_kwargs = dict(
|
||||
inputs,
|
||||
streamer=streamer,
|
||||
stopping_criteria=stopping_criteria,
|
||||
**pipeline_kwargs,
|
||||
)
|
||||
t1 = Thread(target=self.pipeline.model.generate, kwargs=generation_kwargs)
|
||||
t1.start()
|
||||
|
||||
for char in streamer:
|
||||
chunk = GenerationChunk(text=char)
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
|
||||
|
||||
yield chunk
|
||||
|
||||
@@ -234,7 +234,7 @@ class E2BDataAnalysisTool(BaseTool):
|
||||
]
|
||||
self.description = self.description + "\n" + self.uploaded_files_description
|
||||
|
||||
def as_tool(self) -> Tool:
|
||||
def as_tool(self) -> Tool: # type: ignore[override]
|
||||
return Tool.from_function(
|
||||
func=self._run,
|
||||
name=self.name,
|
||||
|
||||
@@ -65,7 +65,7 @@ class EdenAiTextToSpeechTool(EdenaiTool):
|
||||
)
|
||||
return v
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def check_voice_models_key_is_provider_name(cls, values: dict) -> dict:
|
||||
for key in values.get("voice_models", {}).keys():
|
||||
if key not in values.get("providers", []):
|
||||
|
||||
@@ -38,7 +38,7 @@ class BaseBrowserTool(BaseTool):
|
||||
sync_browser: Optional["SyncBrowser"] = None
|
||||
async_browser: Optional["AsyncBrowser"] = None
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_browser_provided(cls, values: dict) -> dict:
|
||||
"""Check that the arguments are valid."""
|
||||
lazy_import_playwright_browsers()
|
||||
|
||||
@@ -35,7 +35,7 @@ class ExtractHyperlinksTool(BaseBrowserTool):
|
||||
description: str = "Extract all hyperlinks on the current webpage"
|
||||
args_schema: Type[BaseModel] = ExtractHyperlinksToolInput
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def check_bs_import(cls, values: dict) -> dict:
|
||||
"""Check that the arguments are valid."""
|
||||
try:
|
||||
|
||||
@@ -22,7 +22,7 @@ class ExtractTextTool(BaseBrowserTool):
|
||||
description: str = "Extract all the text on the current webpage"
|
||||
args_schema: Type[BaseModel] = BaseModel
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def check_acheck_bs_importrgs(cls, values: dict) -> dict:
|
||||
"""Check that the arguments are valid."""
|
||||
try:
|
||||
|
||||
@@ -21,7 +21,7 @@ class ShellInput(BaseModel):
|
||||
)
|
||||
"""List of shell commands to run."""
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def _validate_commands(cls, values: dict) -> dict:
|
||||
"""Validate commands."""
|
||||
# TODO: Add real validators
|
||||
|
||||
@@ -75,8 +75,9 @@ from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForToolRun,
|
||||
CallbackManagerForToolRun,
|
||||
)
|
||||
from langchain_core.pydantic_v1 import Field, root_validator
|
||||
from langchain_core.pydantic_v1 import Field
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils import pre_init
|
||||
|
||||
from langchain_community.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT
|
||||
from langchain_community.utilities.zapier import ZapierNLAWrapper
|
||||
@@ -105,7 +106,7 @@ class ZapierNLARunAction(BaseTool):
|
||||
name: str = ""
|
||||
description: str = ""
|
||||
|
||||
@root_validator
|
||||
@pre_init
|
||||
def set_name_description(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
zapier_description = values["zapier_description"]
|
||||
params_schema = values["params_schema"]
|
||||
|
||||
@@ -39,7 +39,7 @@ class SteamWebAPIWrapper(BaseModel):
|
||||
"""Return a list of operations."""
|
||||
return self.operations
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=True)
|
||||
def validate_environment(cls, values: dict) -> dict:
|
||||
"""Validate api key and python package has been configured."""
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ class YouSearchAPIWrapper(BaseModel):
|
||||
|
||||
return values
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=False, skip_on_failure=True)
|
||||
def warn_if_set_fields_have_no_effect(cls, values: Dict) -> Dict:
|
||||
if values["endpoint_type"] != "news":
|
||||
news_api_fields = ("search_lang", "ui_lang", "spellcheck")
|
||||
@@ -139,7 +139,7 @@ class YouSearchAPIWrapper(BaseModel):
|
||||
)
|
||||
return values
|
||||
|
||||
@root_validator
|
||||
@root_validator(pre=False, skip_on_failure=True)
|
||||
def warn_if_deprecated_endpoints_are_used(cls, values: Dict) -> Dict:
|
||||
if values["endpoint_type"] == "snippets":
|
||||
warnings.warn(
|
||||
|
||||
@@ -909,6 +909,11 @@ class Neo4jVector(VectorStore):
|
||||
Args:
|
||||
query (str): Query text to search for.
|
||||
k (int): Number of results to return. Defaults to 4.
|
||||
params (Dict[str, Any]): The search params for the index type.
|
||||
Defaults to empty dict.
|
||||
filter (Optional[Dict[str, Any]]): Dictionary of argument(s) to
|
||||
filter on metadata.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
List of Documents most similar to the query.
|
||||
@@ -936,6 +941,11 @@ class Neo4jVector(VectorStore):
|
||||
Args:
|
||||
query: Text to look up documents similar to.
|
||||
k: Number of Documents to return. Defaults to 4.
|
||||
params (Dict[str, Any]): The search params for the index type.
|
||||
Defaults to empty dict.
|
||||
filter (Optional[Dict[str, Any]]): Dictionary of argument(s) to
|
||||
filter on metadata.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
List of Documents most similar to the query and score for each
|
||||
@@ -972,6 +982,11 @@ class Neo4jVector(VectorStore):
|
||||
Args:
|
||||
embedding (List[float]): The embedding vector to compare against.
|
||||
k (int, optional): The number of top similar documents to retrieve.
|
||||
filter (Optional[Dict[str, Any]]): Dictionary of argument(s) to
|
||||
filter on metadata.
|
||||
Defaults to None.
|
||||
params (Dict[str, Any]): The search params for the index type.
|
||||
Defaults to empty dict.
|
||||
|
||||
Returns:
|
||||
List[Tuple[Document, float]]: A list of tuples, each containing
|
||||
@@ -1077,6 +1092,7 @@ class Neo4jVector(VectorStore):
|
||||
embedding: List[float],
|
||||
k: int = 4,
|
||||
filter: Optional[Dict[str, Any]] = None,
|
||||
params: Dict[str, Any] = {},
|
||||
**kwargs: Any,
|
||||
) -> List[Document]:
|
||||
"""Return docs most similar to embedding vector.
|
||||
@@ -1084,12 +1100,17 @@ class Neo4jVector(VectorStore):
|
||||
Args:
|
||||
embedding: Embedding to look up documents similar to.
|
||||
k: Number of Documents to return. Defaults to 4.
|
||||
filter (Optional[Dict[str, Any]]): Dictionary of argument(s) to
|
||||
filter on metadata.
|
||||
Defaults to None.
|
||||
params (Dict[str, Any]): The search params for the index type.
|
||||
Defaults to empty dict.
|
||||
|
||||
Returns:
|
||||
List of Documents most similar to the query vector.
|
||||
"""
|
||||
docs_and_scores = self.similarity_search_with_score_by_vector(
|
||||
embedding=embedding, k=k, filter=filter, **kwargs
|
||||
embedding=embedding, k=k, filter=filter, params=params, **kwargs
|
||||
)
|
||||
return [doc for doc, _ in docs_and_scores]
|
||||
|
||||
|
||||
1167
libs/community/poetry.lock
generated
1167
libs/community/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "langchain-community"
|
||||
version = "0.2.6"
|
||||
version = "0.2.7"
|
||||
description = "Community contributed LangChain integrations."
|
||||
authors = []
|
||||
license = "MIT"
|
||||
@@ -26,8 +26,8 @@ ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogy
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.8.1,<4.0"
|
||||
langchain-core = "^0.2.10"
|
||||
langchain = "^0.2.6"
|
||||
langchain-core = "^0.2.12"
|
||||
langchain = "^0.2.7"
|
||||
SQLAlchemy = ">=1.4,<3"
|
||||
requests = "^2"
|
||||
PyYAML = ">=5.3"
|
||||
|
||||
@@ -14,7 +14,7 @@ fi
|
||||
repository_path="$1"
|
||||
|
||||
# Search for lines matching the pattern within the specified repository
|
||||
result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic')
|
||||
result=$(git -C "$repository_path" grep -En '^import pydantic|^from pydantic')
|
||||
|
||||
# Check if any matching lines were found
|
||||
if [ -n "$result" ]; then
|
||||
@@ -25,3 +25,20 @@ if [ -n "$result" ]; then
|
||||
echo "with 'from langchain_core.pydantic_v1 import BaseModel'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Forbid vanilla usage of @root_validator
|
||||
# This prevents the code from using either @root_validator or @root_validator()
|
||||
# Search for lines matching the pattern within the specified repository
|
||||
result=$(git -C "$repository_path" grep -En '(@root_validator\s*$)|(@root_validator\(\))' -- '*.py')
|
||||
|
||||
# Check if any matching lines were found
|
||||
if [ -n "$result" ]; then
|
||||
echo "ERROR: The following lines need to be updated:"
|
||||
echo
|
||||
echo "$result"
|
||||
echo
|
||||
echo "Please replace @root_validator or @root_validator() with either:"
|
||||
echo
|
||||
echo "@root_validator(pre=True) or @root_validator(pre=False, skip_on_failure=True)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -1,24 +1,46 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script searches for invalid imports in tracked files within a Git repository.
|
||||
#
|
||||
# Usage: ./scripts/lint_imports.sh /path/to/repository
|
||||
set -eu
|
||||
|
||||
# Initialize a variable to keep track of errors
|
||||
errors=0
|
||||
# Check if a path argument is provided
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "Usage: $0 /path/to/repository"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# make sure not importing from langchain or langchain_experimental
|
||||
git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1))
|
||||
repository_path="$1"
|
||||
|
||||
# make sure not importing from langchain_experimental
|
||||
result=$(git -C "$repository_path" grep -En '^import langchain_experimental|^from langchain_experimental' -- '*.py' || true)
|
||||
|
||||
# Check if any matching lines were found
|
||||
if [ -n "$result" ]; then
|
||||
echo "ERROR: The following lines need to be updated:"
|
||||
echo "$result"
|
||||
echo "langchain_community should import from langchain_experimental."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# make sure no one is importing from the built-in xml library
|
||||
# instead defusedxml should be used to avoid getting CVEs.
|
||||
# Whether the standary library actually poses a risk to users
|
||||
# is very nuanced and dependns on user's environment.
|
||||
# Whether the standard library actually poses a risk to users
|
||||
# is very nuanced and depends on the user's environment.
|
||||
# https://docs.python.org/3/library/xml.etree.elementtree.html
|
||||
git --no-pager grep '^from xml\.' . | grep -vE "# OK: user-must-opt-in" && errors=$((errors+1))
|
||||
git --no-pager grep '^import xml\.' . | grep -vE "# OK: user-must-opt-in" && errors=$((errors+1))
|
||||
|
||||
# Decide on an exit status based on the errors
|
||||
if [ "$errors" -gt 0 ]; then
|
||||
exit 1
|
||||
else
|
||||
exit 0
|
||||
result=$(git -C "$repository_path" grep -En '^from xml.|^import xml$|^import xml.' | grep -vE "# OK: user-must-opt-in" || true)
|
||||
|
||||
if [ -n "$result" ]; then
|
||||
echo "ERROR: The following lines need to be updated:"
|
||||
echo "$result"
|
||||
echo "Triggering an error due to usage of the built-in xml library. "
|
||||
echo "Please see https://docs.python.org/3/library/xml.html#xml-vulnerabilities."
|
||||
echo "If this happens, there's likely code that's relying on the standard library "
|
||||
echo "to parse xml somewhere in the code path. "
|
||||
echo "Please update the code to force the user to explicitly opt-in to using the standard library or running the code. "
|
||||
echo "It should be **obvious** without reading the documentation that they are being forced to use the standard library. "
|
||||
echo "After this is done feel free to add a comment to the line with '# OK: user-must-opt-in', after the import. "
|
||||
echo "Lacking a clear opt-in mechanism is likely a security risk, and will result in rejection of the PR."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -0,0 +1,117 @@
|
||||
import pytest
|
||||
from langchain_core.graph_vectorstores import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors import (
|
||||
HtmlInput,
|
||||
HtmlLinkExtractor,
|
||||
)
|
||||
|
||||
PAGE_1 = """
|
||||
<html>
|
||||
<body>
|
||||
Hello.
|
||||
<a href="relative">Relative</a>
|
||||
<a href="/relative-base">Relative base.</a>
|
||||
<a href="http://cnn.com">Aboslute</a>
|
||||
<a href="//same.foo">Test</a>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
PAGE_2 = """
|
||||
<html>
|
||||
<body>
|
||||
Hello.
|
||||
<a href="/bar/#fragment">Relative</a>
|
||||
</html>
|
||||
"""
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_one_from_str() -> None:
|
||||
extractor = HtmlLinkExtractor()
|
||||
|
||||
results = extractor.extract_one(HtmlInput(PAGE_1, base_url="https://foo.com/bar/"))
|
||||
assert results == {
|
||||
Link.incoming(kind="hyperlink", tag="https://foo.com/bar/"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/bar/relative"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/relative-base"),
|
||||
Link.outgoing(kind="hyperlink", tag="http://cnn.com"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://same.foo"),
|
||||
}
|
||||
|
||||
results = extractor.extract_one(HtmlInput(PAGE_1, base_url="http://foo.com/bar/"))
|
||||
assert results == {
|
||||
Link.incoming(kind="hyperlink", tag="http://foo.com/bar/"),
|
||||
Link.outgoing(kind="hyperlink", tag="http://foo.com/bar/relative"),
|
||||
Link.outgoing(kind="hyperlink", tag="http://foo.com/relative-base"),
|
||||
Link.outgoing(kind="hyperlink", tag="http://cnn.com"),
|
||||
Link.outgoing(kind="hyperlink", tag="http://same.foo"),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_one_from_beautiful_soup() -> None:
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
extractor = HtmlLinkExtractor()
|
||||
soup = BeautifulSoup(PAGE_1, "html.parser")
|
||||
results = extractor.extract_one(HtmlInput(soup, base_url="https://foo.com/bar/"))
|
||||
assert results == {
|
||||
Link.incoming(kind="hyperlink", tag="https://foo.com/bar/"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/bar/relative"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/relative-base"),
|
||||
Link.outgoing(kind="hyperlink", tag="http://cnn.com"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://same.foo"),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_drop_fragments() -> None:
|
||||
extractor = HtmlLinkExtractor(drop_fragments=True)
|
||||
results = extractor.extract_one(
|
||||
HtmlInput(PAGE_2, base_url="https://foo.com/baz/#fragment")
|
||||
)
|
||||
|
||||
assert results == {
|
||||
Link.incoming(kind="hyperlink", tag="https://foo.com/baz/"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/bar/"),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_include_fragments() -> None:
|
||||
extractor = HtmlLinkExtractor(drop_fragments=False)
|
||||
results = extractor.extract_one(
|
||||
HtmlInput(PAGE_2, base_url="https://foo.com/baz/#fragment")
|
||||
)
|
||||
|
||||
assert results == {
|
||||
Link.incoming(kind="hyperlink", tag="https://foo.com/baz/#fragment"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/bar/#fragment"),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_batch_from_str() -> None:
|
||||
extractor = HtmlLinkExtractor()
|
||||
results = list(
|
||||
extractor.extract_many(
|
||||
[
|
||||
HtmlInput(PAGE_1, base_url="https://foo.com/bar/"),
|
||||
HtmlInput(PAGE_2, base_url="https://foo.com/baz/"),
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
assert results[0] == {
|
||||
Link.incoming(kind="hyperlink", tag="https://foo.com/bar/"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/bar/relative"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/relative-base"),
|
||||
Link.outgoing(kind="hyperlink", tag="http://cnn.com"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://same.foo"),
|
||||
}
|
||||
assert results[1] == {
|
||||
Link.incoming(kind="hyperlink", tag="https://foo.com/baz/"),
|
||||
Link.outgoing(kind="hyperlink", tag="https://foo.com/bar/"),
|
||||
}
|
||||
@@ -13,7 +13,7 @@ tests:
|
||||
poetry run pytest $(TEST_FILE)
|
||||
|
||||
test_watch:
|
||||
poetry run ptw --snapshot-update --now . -- -vv -x tests/unit_tests
|
||||
poetry run ptw --snapshot-update --now . -- -vv tests/unit_tests
|
||||
|
||||
test_profile:
|
||||
poetry run pytest -vv tests/unit_tests/ --profile-svg
|
||||
|
||||
@@ -38,11 +38,15 @@ from langchain_core.callbacks.manager import (
|
||||
CallbackManagerForToolRun,
|
||||
ParentRunManager,
|
||||
RunManager,
|
||||
adispatch_custom_event,
|
||||
dispatch_custom_event,
|
||||
)
|
||||
from langchain_core.callbacks.stdout import StdOutCallbackHandler
|
||||
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
|
||||
__all__ = [
|
||||
"dispatch_custom_event",
|
||||
"adispatch_custom_event",
|
||||
"RetrieverManagerMixin",
|
||||
"LLMManagerMixin",
|
||||
"ChainManagerMixin",
|
||||
|
||||
@@ -370,6 +370,31 @@ class RunManagerMixin:
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Override to define a handler for a custom event.
|
||||
|
||||
Args:
|
||||
name: The name of the custom event.
|
||||
data: The data for the custom event. Format will match
|
||||
the format specified by the user.
|
||||
run_id: The ID of the run.
|
||||
tags: The tags associated with the custom event
|
||||
(includes inherited tags).
|
||||
metadata: The metadata associated with the custom event
|
||||
(includes inherited metadata).
|
||||
|
||||
.. versionadded:: 0.2.14
|
||||
"""
|
||||
|
||||
|
||||
class BaseCallbackHandler(
|
||||
LLMManagerMixin,
|
||||
@@ -417,6 +442,11 @@ class BaseCallbackHandler(
|
||||
"""Whether to ignore chat model callbacks."""
|
||||
return False
|
||||
|
||||
@property
|
||||
def ignore_custom_event(self) -> bool:
|
||||
"""Ignore custom event."""
|
||||
return False
|
||||
|
||||
|
||||
class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
"""Async callback handler for LangChain."""
|
||||
@@ -799,6 +829,31 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
kwargs (Any): Additional keyword arguments.
|
||||
"""
|
||||
|
||||
async def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Override to define a handler for a custom event.
|
||||
|
||||
Args:
|
||||
name: The name of the custom event.
|
||||
data: The data for the custom event. Format will match
|
||||
the format specified by the user.
|
||||
run_id: The ID of the run.
|
||||
tags: The tags associated with the custom event
|
||||
(includes inherited tags).
|
||||
metadata: The metadata associated with the custom event
|
||||
(includes inherited metadata).
|
||||
|
||||
.. versionadded:: 0.2.14
|
||||
"""
|
||||
|
||||
|
||||
T = TypeVar("T", bound="BaseCallbackManager")
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ if TYPE_CHECKING:
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1494,6 +1495,46 @@ class CallbackManager(BaseCallbackManager):
|
||||
inheritable_metadata=self.inheritable_metadata,
|
||||
)
|
||||
|
||||
def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Dispatch an adhoc event to the handlers (async version).
|
||||
|
||||
This event should NOT be used in any internal LangChain code. The event
|
||||
is meant specifically for users of the library to dispatch custom
|
||||
events that are tailored to their application.
|
||||
|
||||
Args:
|
||||
name: The name of the adhoc event.
|
||||
data: The data for the adhoc event.
|
||||
run_id: The ID of the run. Defaults to None.
|
||||
|
||||
.. versionadded:: 0.2.14
|
||||
"""
|
||||
if kwargs:
|
||||
raise ValueError(
|
||||
"The dispatcher API does not accept additional keyword arguments."
|
||||
"Please do not pass any additional keyword arguments, instead "
|
||||
"include them in the data field."
|
||||
)
|
||||
if run_id is None:
|
||||
run_id = uuid.uuid4()
|
||||
|
||||
handle_event(
|
||||
self.handlers,
|
||||
"on_custom_event",
|
||||
"ignore_custom_event",
|
||||
name,
|
||||
data,
|
||||
run_id=run_id,
|
||||
tags=self.tags,
|
||||
metadata=self.metadata,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def configure(
|
||||
cls,
|
||||
@@ -1833,6 +1874,46 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
inheritable_metadata=self.inheritable_metadata,
|
||||
)
|
||||
|
||||
async def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Dispatch an adhoc event to the handlers (async version).
|
||||
|
||||
This event should NOT be used in any internal LangChain code. The event
|
||||
is meant specifically for users of the library to dispatch custom
|
||||
events that are tailored to their application.
|
||||
|
||||
Args:
|
||||
name: The name of the adhoc event.
|
||||
data: The data for the adhoc event.
|
||||
run_id: The ID of the run. Defaults to None.
|
||||
|
||||
.. versionadded:: 0.2.14
|
||||
"""
|
||||
if run_id is None:
|
||||
run_id = uuid.uuid4()
|
||||
|
||||
if kwargs:
|
||||
raise ValueError(
|
||||
"The dispatcher API does not accept additional keyword arguments."
|
||||
"Please do not pass any additional keyword arguments, instead "
|
||||
"include them in the data field."
|
||||
)
|
||||
await ahandle_event(
|
||||
self.handlers,
|
||||
"on_custom_event",
|
||||
"ignore_custom_event",
|
||||
name,
|
||||
data,
|
||||
run_id=run_id,
|
||||
tags=self.tags,
|
||||
metadata=self.metadata,
|
||||
)
|
||||
|
||||
async def on_retriever_start(
|
||||
self,
|
||||
serialized: Dict[str, Any],
|
||||
@@ -2137,7 +2218,7 @@ def _configure(
|
||||
logger.warning(
|
||||
"Unable to load requested LangChainTracer."
|
||||
" To disable this warning,"
|
||||
" unset the LANGCHAIN_TRACING_V2 environment variables.",
|
||||
" unset the LANGCHAIN_TRACING_V2 environment variables.\n"
|
||||
f"{repr(e)}",
|
||||
)
|
||||
if run_tree is not None:
|
||||
@@ -2169,3 +2250,189 @@ def _configure(
|
||||
):
|
||||
callback_manager.add_handler(var_handler, inheritable)
|
||||
return callback_manager
|
||||
|
||||
|
||||
async def adispatch_custom_event(
|
||||
name: str, data: Any, *, config: Optional[RunnableConfig] = None
|
||||
) -> None:
|
||||
"""Dispatch an adhoc event to the handlers.
|
||||
|
||||
Args:
|
||||
name: The name of the adhoc event.
|
||||
data: The data for the adhoc event. Free form data. Ideally should be
|
||||
JSON serializable to avoid serialization issues downstream, but
|
||||
this is not enforced.
|
||||
config: Optional config object. Mirrors the async API but not strictly needed.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackHandler,
|
||||
adispatch_custom_event
|
||||
)
|
||||
from langchain_core.runnable import RunnableLambda
|
||||
|
||||
class CustomCallbackManager(AsyncCallbackHandler):
|
||||
async def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
print(f"Received custom event: {name} with data: {data}")
|
||||
|
||||
callback = CustomCallbackManager()
|
||||
|
||||
async def foo(inputs):
|
||||
await adispatch_custom_event("my_event", {"bar": "buzz})
|
||||
return inputs
|
||||
|
||||
foo_ = RunnableLambda(foo)
|
||||
await foo_.ainvoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]})
|
||||
|
||||
Example: Use with astream events
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackHandler,
|
||||
adispatch_custom_event
|
||||
)
|
||||
from langchain_core.runnable import RunnableLambda
|
||||
|
||||
class CustomCallbackManager(AsyncCallbackHandler):
|
||||
async def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
print(f"Received custom event: {name} with data: {data}")
|
||||
|
||||
callback = CustomCallbackManager()
|
||||
|
||||
async def foo(inputs):
|
||||
await adispatch_custom_event("event_type_1", {"bar": "buzz})
|
||||
await adispatch_custom_event("event_type_2", 5)
|
||||
return inputs
|
||||
|
||||
foo_ = RunnableLambda(foo)
|
||||
|
||||
async for event in foo_.ainvoke_stream(
|
||||
{"a": "1"},
|
||||
version="v2",
|
||||
config={"callbacks": [CustomCallbackManager()]}
|
||||
):
|
||||
print(event)
|
||||
|
||||
.. warning: If using python <= 3.10 and async, you MUST
|
||||
specify the `config` parameter or the function will raise an error.
|
||||
This is due to a limitation in asyncio for python <= 3.10 that prevents
|
||||
LangChain from automatically propagating the config object on the user's
|
||||
behalf.
|
||||
|
||||
.. versionadded:: 0.2.14
|
||||
"""
|
||||
from langchain_core.runnables.config import (
|
||||
ensure_config,
|
||||
get_async_callback_manager_for_config,
|
||||
)
|
||||
|
||||
config = ensure_config(config)
|
||||
callback_manager = get_async_callback_manager_for_config(config)
|
||||
# We want to get the callback manager for the parent run.
|
||||
# This is a work-around for now to be able to dispatch adhoc events from
|
||||
# within a tool or a lambda and have the metadata events associated
|
||||
# with the parent run rather than have a new run id generated for each.
|
||||
if callback_manager.parent_run_id is None:
|
||||
raise RuntimeError(
|
||||
"Unable to dispatch an adhoc event without a parent run id."
|
||||
"This function can only be called from within an existing run (e.g.,"
|
||||
"inside a tool or a RunnableLambda or a RunnableGenerator.)"
|
||||
"If you are doing that and still seeing this error, try explicitly"
|
||||
"passing the config parameter to this function."
|
||||
)
|
||||
|
||||
await callback_manager.on_custom_event(
|
||||
name,
|
||||
data,
|
||||
run_id=callback_manager.parent_run_id,
|
||||
)
|
||||
|
||||
|
||||
def dispatch_custom_event(
|
||||
name: str, data: Any, *, config: Optional[RunnableConfig] = None
|
||||
) -> None:
|
||||
"""Dispatch an adhoc event.
|
||||
|
||||
Args:
|
||||
name: The name of the adhoc event.
|
||||
data: The data for the adhoc event. Free form data. Ideally should be
|
||||
JSON serializable to avoid serialization issues downstream, but
|
||||
this is not enforced.
|
||||
config: Optional config object. Mirrors the async API but not strictly needed.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_core.callbacks import dispatch_custom_event
|
||||
from langchain_core.runnable import RunnableLambda
|
||||
|
||||
class CustomCallbackManager(BaseCallbackHandler):
|
||||
def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
print(f"Received custom event: {name} with data: {data}")
|
||||
|
||||
def foo(inputs):
|
||||
dispatch_custom_event("my_event", {"bar": "buzz})
|
||||
return inputs
|
||||
|
||||
foo_ = RunnableLambda(foo)
|
||||
foo_.invoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]})
|
||||
|
||||
.. versionadded:: 0.2.14
|
||||
"""
|
||||
from langchain_core.runnables.config import (
|
||||
ensure_config,
|
||||
get_callback_manager_for_config,
|
||||
)
|
||||
|
||||
config = ensure_config(config)
|
||||
callback_manager = get_callback_manager_for_config(config)
|
||||
# We want to get the callback manager for the parent run.
|
||||
# This is a work-around for now to be able to dispatch adhoc events from
|
||||
# within a tool or a lambda and have the metadata events associated
|
||||
# with the parent run rather than have a new run id generated for each.
|
||||
if callback_manager.parent_run_id is None:
|
||||
raise RuntimeError(
|
||||
"Unable to dispatch an adhoc event without a parent run id."
|
||||
"This function can only be called from within an existing run (e.g.,"
|
||||
"inside a tool or a RunnableLambda or a RunnableGenerator.)"
|
||||
"If you are doing that and still seeing this error, try explicitly"
|
||||
"passing the config parameter to this function."
|
||||
)
|
||||
callback_manager.on_custom_event(
|
||||
name,
|
||||
data,
|
||||
run_id=callback_manager.parent_run_id,
|
||||
)
|
||||
|
||||
@@ -94,13 +94,11 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
|
||||
ChatResult: Chat result.
|
||||
"""
|
||||
|
||||
generation: Optional[ChatGenerationChunk] = None
|
||||
for chunk in stream:
|
||||
if generation is None:
|
||||
generation = chunk
|
||||
else:
|
||||
generation += chunk
|
||||
assert generation is not None
|
||||
generation = next(stream, None)
|
||||
if generation:
|
||||
generation += list(stream)
|
||||
if generation is None:
|
||||
raise ValueError("No generations found in stream.")
|
||||
return ChatResult(
|
||||
generations=[
|
||||
ChatGeneration(
|
||||
@@ -123,21 +121,8 @@ async def agenerate_from_stream(
|
||||
ChatResult: Chat result.
|
||||
"""
|
||||
|
||||
generation: Optional[ChatGenerationChunk] = None
|
||||
async for chunk in stream:
|
||||
if generation is None:
|
||||
generation = chunk
|
||||
else:
|
||||
generation += chunk
|
||||
assert generation is not None
|
||||
return ChatResult(
|
||||
generations=[
|
||||
ChatGeneration(
|
||||
message=message_chunk_to_message(generation.message),
|
||||
generation_info=generation.generation_info,
|
||||
)
|
||||
]
|
||||
)
|
||||
chunks = [chunk async for chunk in stream]
|
||||
return await run_in_executor(None, generate_from_stream, iter(chunks))
|
||||
|
||||
|
||||
class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
||||
|
||||
@@ -267,64 +267,69 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
||||
|
||||
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
|
||||
if isinstance(other, AIMessageChunk):
|
||||
if self.example != other.example:
|
||||
raise ValueError(
|
||||
"Cannot concatenate AIMessageChunks with different example values."
|
||||
)
|
||||
|
||||
content = merge_content(self.content, other.content)
|
||||
additional_kwargs = merge_dicts(
|
||||
self.additional_kwargs, other.additional_kwargs
|
||||
)
|
||||
response_metadata = merge_dicts(
|
||||
self.response_metadata, other.response_metadata
|
||||
)
|
||||
|
||||
# Merge tool call chunks
|
||||
if self.tool_call_chunks or other.tool_call_chunks:
|
||||
raw_tool_calls = merge_lists(
|
||||
self.tool_call_chunks,
|
||||
other.tool_call_chunks,
|
||||
)
|
||||
if raw_tool_calls:
|
||||
tool_call_chunks = [
|
||||
ToolCallChunk(
|
||||
name=rtc.get("name"),
|
||||
args=rtc.get("args"),
|
||||
index=rtc.get("index"),
|
||||
id=rtc.get("id"),
|
||||
)
|
||||
for rtc in raw_tool_calls
|
||||
]
|
||||
else:
|
||||
tool_call_chunks = []
|
||||
else:
|
||||
tool_call_chunks = []
|
||||
|
||||
# Token usage
|
||||
if self.usage_metadata or other.usage_metadata:
|
||||
left: UsageMetadata = self.usage_metadata or UsageMetadata(
|
||||
input_tokens=0, output_tokens=0, total_tokens=0
|
||||
)
|
||||
right: UsageMetadata = other.usage_metadata or UsageMetadata(
|
||||
input_tokens=0, output_tokens=0, total_tokens=0
|
||||
)
|
||||
usage_metadata: Optional[UsageMetadata] = {
|
||||
"input_tokens": left["input_tokens"] + right["input_tokens"],
|
||||
"output_tokens": left["output_tokens"] + right["output_tokens"],
|
||||
"total_tokens": left["total_tokens"] + right["total_tokens"],
|
||||
}
|
||||
else:
|
||||
usage_metadata = None
|
||||
|
||||
return self.__class__(
|
||||
example=self.example,
|
||||
content=content,
|
||||
additional_kwargs=additional_kwargs,
|
||||
tool_call_chunks=tool_call_chunks,
|
||||
response_metadata=response_metadata,
|
||||
usage_metadata=usage_metadata,
|
||||
id=self.id,
|
||||
)
|
||||
|
||||
return add_ai_message_chunks(self, other)
|
||||
elif isinstance(other, (list, tuple)) and all(
|
||||
isinstance(o, AIMessageChunk) for o in other
|
||||
):
|
||||
return add_ai_message_chunks(self, *other)
|
||||
return super().__add__(other)
|
||||
|
||||
|
||||
def add_ai_message_chunks(
|
||||
left: AIMessageChunk, *others: AIMessageChunk
|
||||
) -> AIMessageChunk:
|
||||
"""Add multiple AIMessageChunks together."""
|
||||
if any(left.example != o.example for o in others):
|
||||
raise ValueError(
|
||||
"Cannot concatenate AIMessageChunks with different example values."
|
||||
)
|
||||
|
||||
content = merge_content(left.content, *(o.content for o in others))
|
||||
additional_kwargs = merge_dicts(
|
||||
left.additional_kwargs, *(o.additional_kwargs for o in others)
|
||||
)
|
||||
response_metadata = merge_dicts(
|
||||
left.response_metadata, *(o.response_metadata for o in others)
|
||||
)
|
||||
|
||||
# Merge tool call chunks
|
||||
if raw_tool_calls := merge_lists(
|
||||
left.tool_call_chunks, *(o.tool_call_chunks for o in others)
|
||||
):
|
||||
tool_call_chunks = [
|
||||
ToolCallChunk(
|
||||
name=rtc.get("name"),
|
||||
args=rtc.get("args"),
|
||||
index=rtc.get("index"),
|
||||
id=rtc.get("id"),
|
||||
)
|
||||
for rtc in raw_tool_calls
|
||||
]
|
||||
else:
|
||||
tool_call_chunks = []
|
||||
|
||||
# Token usage
|
||||
if left.usage_metadata or any(o.usage_metadata is not None for o in others):
|
||||
usage_metadata_: UsageMetadata = left.usage_metadata or UsageMetadata(
|
||||
input_tokens=0, output_tokens=0, total_tokens=0
|
||||
)
|
||||
for other in others:
|
||||
if other.usage_metadata is not None:
|
||||
usage_metadata_["input_tokens"] += other.usage_metadata["input_tokens"]
|
||||
usage_metadata_["output_tokens"] += other.usage_metadata[
|
||||
"output_tokens"
|
||||
]
|
||||
usage_metadata_["total_tokens"] += other.usage_metadata["total_tokens"]
|
||||
usage_metadata: Optional[UsageMetadata] = usage_metadata_
|
||||
else:
|
||||
usage_metadata = None
|
||||
|
||||
return left.__class__(
|
||||
example=left.example,
|
||||
content=content,
|
||||
additional_kwargs=additional_kwargs,
|
||||
tool_call_chunks=tool_call_chunks,
|
||||
response_metadata=response_metadata,
|
||||
usage_metadata=usage_metadata,
|
||||
id=left.id,
|
||||
)
|
||||
|
||||
@@ -111,7 +111,7 @@ class BaseMessage(Serializable):
|
||||
|
||||
def merge_content(
|
||||
first_content: Union[str, List[Union[str, Dict]]],
|
||||
second_content: Union[str, List[Union[str, Dict]]],
|
||||
*contents: Union[str, List[Union[str, Dict]]],
|
||||
) -> Union[str, List[Union[str, Dict]]]:
|
||||
"""Merge two message contents.
|
||||
|
||||
@@ -122,31 +122,32 @@ def merge_content(
|
||||
Returns:
|
||||
The merged content.
|
||||
"""
|
||||
# If first chunk is a string
|
||||
if isinstance(first_content, str):
|
||||
# If the second chunk is also a string, then merge them naively
|
||||
if isinstance(second_content, str):
|
||||
return first_content + second_content
|
||||
# If the second chunk is a list, add the first chunk to the start of the list
|
||||
merged = first_content
|
||||
for content in contents:
|
||||
# If current is a string
|
||||
if isinstance(merged, str):
|
||||
# If the next chunk is also a string, then merge them naively
|
||||
if isinstance(content, str):
|
||||
merged = cast(str, merged) + content
|
||||
# If the next chunk is a list, add the current to the start of the list
|
||||
else:
|
||||
merged = [merged] + content # type: ignore
|
||||
elif isinstance(content, list):
|
||||
# If both are lists
|
||||
merged = merge_lists(cast(List, merged), content) # type: ignore
|
||||
# If the first content is a list, and the second content is a string
|
||||
else:
|
||||
return_list: List[Union[str, Dict]] = [first_content]
|
||||
return return_list + second_content
|
||||
elif isinstance(second_content, List):
|
||||
# If both are lists
|
||||
merged_list = merge_lists(first_content, second_content)
|
||||
return cast(list, merged_list)
|
||||
# If the first content is a list, and the second content is a string
|
||||
else:
|
||||
# If the last element of the first content is a string
|
||||
# Add the second content to the last element
|
||||
if isinstance(first_content[-1], str):
|
||||
return first_content[:-1] + [first_content[-1] + second_content]
|
||||
# If second content is an empty string, treat as a no-op
|
||||
elif second_content == "":
|
||||
return first_content
|
||||
else:
|
||||
# Otherwise, add the second content as a new element of the list
|
||||
return first_content + [second_content]
|
||||
# If the last element of the first content is a string
|
||||
# Add the second content to the last element
|
||||
if isinstance(merged[-1], str):
|
||||
merged[-1] += content
|
||||
# If second content is an empty string, treat as a no-op
|
||||
elif content == "":
|
||||
pass
|
||||
else:
|
||||
# Otherwise, add the second content as a new element of the list
|
||||
merged.append(content)
|
||||
return merged
|
||||
|
||||
|
||||
class BaseMessageChunk(BaseMessage):
|
||||
@@ -195,6 +196,22 @@ class BaseMessageChunk(BaseMessage):
|
||||
self.response_metadata, other.response_metadata
|
||||
),
|
||||
)
|
||||
elif isinstance(other, list) and all(
|
||||
isinstance(o, BaseMessageChunk) for o in other
|
||||
):
|
||||
content = merge_content(self.content, *(o.content for o in other))
|
||||
additional_kwargs = merge_dicts(
|
||||
self.additional_kwargs, *(o.additional_kwargs for o in other)
|
||||
)
|
||||
response_metadata = merge_dicts(
|
||||
self.response_metadata, *(o.response_metadata for o in other)
|
||||
)
|
||||
return self.__class__( # type: ignore[call-arg]
|
||||
id=self.id,
|
||||
content=content,
|
||||
additional_kwargs=additional_kwargs,
|
||||
response_metadata=response_metadata,
|
||||
)
|
||||
else:
|
||||
raise TypeError(
|
||||
'unsupported operand type(s) for +: "'
|
||||
|
||||
@@ -3,12 +3,8 @@ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
||||
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from langchain_core.messages.base import (
|
||||
BaseMessage,
|
||||
BaseMessageChunk,
|
||||
merge_content,
|
||||
)
|
||||
from langchain_core.utils._merge import merge_dicts
|
||||
from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content
|
||||
from langchain_core.utils._merge import merge_dicts, merge_obj
|
||||
|
||||
|
||||
class ToolMessage(BaseMessage):
|
||||
@@ -17,7 +13,7 @@ class ToolMessage(BaseMessage):
|
||||
ToolMessages contain the result of a tool invocation. Typically, the result
|
||||
is encoded inside the `content` field.
|
||||
|
||||
Example: A TooMessage representing a result of 42 from a tool call with id
|
||||
Example: A ToolMessage representing a result of 42 from a tool call with id
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -25,10 +21,29 @@ class ToolMessage(BaseMessage):
|
||||
|
||||
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
|
||||
|
||||
Example: A ToolMessage where only part of the tool output is sent to the model
|
||||
and the full output is passed in to raw_output.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
raw_output=tool_output,
|
||||
tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',
|
||||
)
|
||||
|
||||
The tool_call_id field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
tool_call_id: str
|
||||
"""Tool call that this message is responding to."""
|
||||
@@ -39,6 +54,14 @@ class ToolMessage(BaseMessage):
|
||||
type: Literal["tool"] = "tool"
|
||||
"""The type of the message (used for serialization). Defaults to "tool"."""
|
||||
|
||||
raw_output: Any = None
|
||||
"""The raw output of the tool.
|
||||
|
||||
**Not part of the payload sent to the model.** Should only be specified if it is
|
||||
different from the message content, i.e. if only a subset of the full tool output
|
||||
is being passed as message content.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
@@ -83,6 +106,7 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
|
||||
return self.__class__(
|
||||
tool_call_id=self.tool_call_id,
|
||||
content=merge_content(self.content, other.content),
|
||||
raw_output=merge_obj(self.raw_output, other.raw_output),
|
||||
additional_kwargs=merge_dicts(
|
||||
self.additional_kwargs, other.additional_kwargs
|
||||
),
|
||||
|
||||
@@ -221,7 +221,8 @@ def _create_message_from_message_type(
|
||||
elif message_type == "function":
|
||||
message = FunctionMessage(content=content, **kwargs)
|
||||
elif message_type == "tool":
|
||||
message = ToolMessage(content=content, **kwargs)
|
||||
raw_output = kwargs.get("additional_kwargs", {}).pop("raw_output", None)
|
||||
message = ToolMessage(content=content, raw_output=raw_output, **kwargs)
|
||||
elif message_type == "remove":
|
||||
message = RemoveMessage(**kwargs)
|
||||
else:
|
||||
|
||||
@@ -17,6 +17,7 @@ from langchain_core.outputs import (
|
||||
Generation,
|
||||
GenerationChunk,
|
||||
)
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
@@ -37,9 +38,13 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
||||
) -> AsyncIterator[T]:
|
||||
async for chunk in input:
|
||||
if isinstance(chunk, BaseMessage):
|
||||
yield self.parse_result([ChatGeneration(message=chunk)])
|
||||
yield await run_in_executor(
|
||||
None, self.parse_result, [ChatGeneration(message=chunk)]
|
||||
)
|
||||
else:
|
||||
yield self.parse_result([Generation(text=chunk)])
|
||||
yield await run_in_executor(
|
||||
None, self.parse_result, [Generation(text=chunk)]
|
||||
)
|
||||
|
||||
def transform(
|
||||
self,
|
||||
@@ -153,7 +158,7 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
|
||||
parsed = await self.aparse_result([acc_gen], partial=True)
|
||||
if parsed is not None and parsed != prev_parsed:
|
||||
if self.diff:
|
||||
yield self._diff(prev_parsed, parsed)
|
||||
yield await run_in_executor(None, self._diff, prev_parsed, parsed)
|
||||
else:
|
||||
yield parsed
|
||||
prev_parsed = parsed
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Literal
|
||||
from typing import Any, Dict, List, Literal, Union
|
||||
|
||||
from langchain_core.messages import BaseMessage, BaseMessageChunk
|
||||
from langchain_core.outputs.generation import Generation
|
||||
@@ -88,7 +88,9 @@ class ChatGenerationChunk(ChatGeneration):
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "output"]
|
||||
|
||||
def __add__(self, other: ChatGenerationChunk) -> ChatGenerationChunk:
|
||||
def __add__(
|
||||
self, other: Union[ChatGenerationChunk, List[ChatGenerationChunk]]
|
||||
) -> ChatGenerationChunk:
|
||||
if isinstance(other, ChatGenerationChunk):
|
||||
generation_info = merge_dicts(
|
||||
self.generation_info or {},
|
||||
@@ -98,6 +100,17 @@ class ChatGenerationChunk(ChatGeneration):
|
||||
message=self.message + other.message,
|
||||
generation_info=generation_info or None,
|
||||
)
|
||||
elif isinstance(other, list) and all(
|
||||
isinstance(x, ChatGenerationChunk) for x in other
|
||||
):
|
||||
generation_info = merge_dicts(
|
||||
self.generation_info or {},
|
||||
*[chunk.generation_info for chunk in other if chunk.generation_info],
|
||||
)
|
||||
return ChatGenerationChunk(
|
||||
message=self.message + [chunk.message for chunk in other],
|
||||
generation_info=generation_info or None,
|
||||
)
|
||||
else:
|
||||
raise TypeError(
|
||||
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
|
||||
|
||||
@@ -92,6 +92,7 @@ if TYPE_CHECKING:
|
||||
from langchain_core.runnables.fallbacks import (
|
||||
RunnableWithFallbacks as RunnableWithFallbacksT,
|
||||
)
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.tracers.log_stream import (
|
||||
RunLog,
|
||||
RunLogPatch,
|
||||
@@ -397,7 +398,9 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
input_node = graph.add_node(self.get_input_schema(config))
|
||||
except TypeError:
|
||||
input_node = graph.add_node(create_model(self.get_name("Input")))
|
||||
runnable_node = graph.add_node(self)
|
||||
runnable_node = graph.add_node(
|
||||
self, metadata=config.get("metadata") if config else None
|
||||
)
|
||||
try:
|
||||
output_node = graph.add_node(self.get_output_schema(config))
|
||||
except TypeError:
|
||||
@@ -2006,6 +2009,78 @@ class Runnable(Generic[Input, Output], ABC):
|
||||
if hasattr(iterator_, "aclose"):
|
||||
await iterator_.aclose()
|
||||
|
||||
@beta_decorator.beta(message="This API is in beta and may change in the future.")
|
||||
def as_tool(
|
||||
self,
|
||||
*,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
arg_types: Optional[Dict[str, Type]] = None,
|
||||
) -> BaseTool:
|
||||
"""Create a BaseTool from a Runnable.
|
||||
|
||||
``as_tool`` will instantiate a BaseTool with a name, description, and
|
||||
``args_schema`` from a runnable. Where possible, schemas are inferred
|
||||
from ``runnable.get_input_schema``. Alternatively (e.g., if the
|
||||
runnable takes a dict as input and the specific dict keys are not typed),
|
||||
pass ``arg_types`` to specify the required arguments.
|
||||
|
||||
Typed dict input:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from typing import List
|
||||
from typing_extensions import TypedDict
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
|
||||
class Args(TypedDict):
|
||||
a: int
|
||||
b: List[int]
|
||||
|
||||
def f(x: Args) -> str:
|
||||
return str(x["a"] * max(x["b"]))
|
||||
|
||||
runnable = RunnableLambda(f)
|
||||
as_tool = runnable.as_tool()
|
||||
as_tool.invoke({"a": 3, "b": [1, 2]})
|
||||
|
||||
``dict`` input, specifying schema:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from typing import Any, Dict, List
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
|
||||
def f(x: Dict[str, Any]) -> str:
|
||||
return str(x["a"] * max(x["b"]))
|
||||
|
||||
runnable = RunnableLambda(f)
|
||||
as_tool = runnable.as_tool(arg_types={"a": int, "b": List[int]})
|
||||
as_tool.invoke({"a": 3, "b": [1, 2]})
|
||||
|
||||
String input:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
|
||||
def f(x: str) -> str:
|
||||
return x + "a"
|
||||
|
||||
def g(x: str) -> str:
|
||||
return x + "z"
|
||||
|
||||
runnable = RunnableLambda(f) | g
|
||||
as_tool = runnable.as_tool()
|
||||
as_tool.invoke("b")
|
||||
"""
|
||||
# Avoid circular import
|
||||
from langchain_core.tools import convert_runnable_to_tool
|
||||
|
||||
return convert_runnable_to_tool(
|
||||
self, name=name, description=description, arg_types=arg_types
|
||||
)
|
||||
|
||||
|
||||
class RunnableSerializable(Serializable, Runnable[Input, Output]):
|
||||
"""Runnable that can be serialized to JSON."""
|
||||
@@ -4556,7 +4631,7 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]):
|
||||
return self.bound.config_specs
|
||||
|
||||
def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph:
|
||||
return self.bound.get_graph(config)
|
||||
return self.bound.get_graph(self._merge_configs(config))
|
||||
|
||||
@classmethod
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from collections import Counter
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
@@ -63,12 +64,32 @@ class Edge(NamedTuple):
|
||||
data: Optional[Stringifiable] = None
|
||||
conditional: bool = False
|
||||
|
||||
def copy(
|
||||
self, *, source: Optional[str] = None, target: Optional[str] = None
|
||||
) -> Edge:
|
||||
return Edge(
|
||||
source=source or self.source,
|
||||
target=target or self.target,
|
||||
data=self.data,
|
||||
conditional=self.conditional,
|
||||
)
|
||||
|
||||
|
||||
class Node(NamedTuple):
|
||||
"""Node in a graph."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
data: Union[Type[BaseModel], RunnableType]
|
||||
metadata: Optional[Dict[str, Any]]
|
||||
|
||||
def copy(self, *, id: Optional[str] = None, name: Optional[str] = None) -> Node:
|
||||
return Node(
|
||||
id=id or self.id,
|
||||
name=name or self.name,
|
||||
data=self.data,
|
||||
metadata=self.metadata,
|
||||
)
|
||||
|
||||
|
||||
class Branch(NamedTuple):
|
||||
@@ -111,35 +132,25 @@ class MermaidDrawMethod(Enum):
|
||||
API = "api" # Uses Mermaid.INK API to render the graph
|
||||
|
||||
|
||||
def node_data_str(node: Node) -> str:
|
||||
def node_data_str(id: str, data: Union[Type[BaseModel], RunnableType]) -> str:
|
||||
"""Convert the data of a node to a string.
|
||||
|
||||
Args:
|
||||
node: The node to convert.
|
||||
id: The node id.
|
||||
data: The node data.
|
||||
|
||||
Returns:
|
||||
A string representation of the data.
|
||||
"""
|
||||
from langchain_core.runnables.base import Runnable
|
||||
|
||||
if not is_uuid(node.id):
|
||||
return node.id
|
||||
elif isinstance(node.data, Runnable):
|
||||
try:
|
||||
data = str(node.data)
|
||||
if (
|
||||
data.startswith("<")
|
||||
or data[0] != data[0].upper()
|
||||
or len(data.splitlines()) > 1
|
||||
):
|
||||
data = node.data.__class__.__name__
|
||||
elif len(data) > 42:
|
||||
data = data[:42] + "..."
|
||||
except Exception:
|
||||
data = node.data.__class__.__name__
|
||||
if not is_uuid(id):
|
||||
return id
|
||||
elif isinstance(data, Runnable):
|
||||
data_str = data.get_name()
|
||||
else:
|
||||
data = node.data.__name__
|
||||
return data if not data.startswith("Runnable") else data[8:]
|
||||
data_str = data.__name__
|
||||
return data_str if not data_str.startswith("Runnable") else data_str[8:]
|
||||
|
||||
|
||||
def node_data_json(
|
||||
@@ -159,23 +170,23 @@ def node_data_json(
|
||||
from langchain_core.runnables.base import Runnable, RunnableSerializable
|
||||
|
||||
if isinstance(node.data, RunnableSerializable):
|
||||
return {
|
||||
json: Dict[str, Any] = {
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": node.data.lc_id(),
|
||||
"name": node.data.get_name(),
|
||||
"name": node_data_str(node.id, node.data),
|
||||
},
|
||||
}
|
||||
elif isinstance(node.data, Runnable):
|
||||
return {
|
||||
json = {
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": to_json_not_implemented(node.data)["id"],
|
||||
"name": node.data.get_name(),
|
||||
"name": node_data_str(node.id, node.data),
|
||||
},
|
||||
}
|
||||
elif inspect.isclass(node.data) and issubclass(node.data, BaseModel):
|
||||
return (
|
||||
json = (
|
||||
{
|
||||
"type": "schema",
|
||||
"data": node.data.schema(),
|
||||
@@ -183,14 +194,17 @@ def node_data_json(
|
||||
if with_schemas
|
||||
else {
|
||||
"type": "schema",
|
||||
"data": node_data_str(node),
|
||||
"data": node_data_str(node.id, node.data),
|
||||
}
|
||||
)
|
||||
else:
|
||||
return {
|
||||
json = {
|
||||
"type": "unknown",
|
||||
"data": node_data_str(node),
|
||||
"data": node_data_str(node.id, node.data),
|
||||
}
|
||||
if node.metadata is not None:
|
||||
json["metadata"] = node.metadata
|
||||
return json
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -236,12 +250,17 @@ class Graph:
|
||||
return uuid4().hex
|
||||
|
||||
def add_node(
|
||||
self, data: Union[Type[BaseModel], RunnableType], id: Optional[str] = None
|
||||
self,
|
||||
data: Union[Type[BaseModel], RunnableType],
|
||||
id: Optional[str] = None,
|
||||
*,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> Node:
|
||||
"""Add a node to the graph and return it."""
|
||||
if id is not None and id in self.nodes:
|
||||
raise ValueError(f"Node with id {id} already exists")
|
||||
node = Node(id=id or self.next_id(), data=data)
|
||||
id = id or self.next_id()
|
||||
node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data))
|
||||
self.nodes[node.id] = node
|
||||
return node
|
||||
|
||||
@@ -285,25 +304,47 @@ class Graph:
|
||||
|
||||
# prefix each node
|
||||
self.nodes.update(
|
||||
{prefixed(k): Node(prefixed(k), v.data) for k, v in graph.nodes.items()}
|
||||
{prefixed(k): v.copy(id=prefixed(k)) for k, v in graph.nodes.items()}
|
||||
)
|
||||
# prefix each edge's source and target
|
||||
self.edges.extend(
|
||||
[
|
||||
Edge(
|
||||
prefixed(edge.source),
|
||||
prefixed(edge.target),
|
||||
edge.data,
|
||||
edge.conditional,
|
||||
)
|
||||
edge.copy(source=prefixed(edge.source), target=prefixed(edge.target))
|
||||
for edge in graph.edges
|
||||
]
|
||||
)
|
||||
# return (prefixed) first and last nodes of the subgraph
|
||||
first, last = graph.first_node(), graph.last_node()
|
||||
return (
|
||||
Node(prefixed(first.id), first.data) if first else None,
|
||||
Node(prefixed(last.id), last.data) if last else None,
|
||||
first.copy(id=prefixed(first.id)) if first else None,
|
||||
last.copy(id=prefixed(last.id)) if last else None,
|
||||
)
|
||||
|
||||
def reid(self) -> Graph:
|
||||
"""Return a new graph with all nodes re-identified,
|
||||
using their unique, readable names where possible."""
|
||||
node_labels = {node.id: node.name for node in self.nodes.values()}
|
||||
node_label_counts = Counter(node_labels.values())
|
||||
|
||||
def _get_node_id(node_id: str) -> str:
|
||||
label = node_labels[node_id]
|
||||
if is_uuid(node_id) and node_label_counts[label] == 1:
|
||||
return label
|
||||
else:
|
||||
return node_id
|
||||
|
||||
return Graph(
|
||||
nodes={
|
||||
_get_node_id(id): node.copy(id=_get_node_id(id))
|
||||
for id, node in self.nodes.items()
|
||||
},
|
||||
edges=[
|
||||
edge.copy(
|
||||
source=_get_node_id(edge.source),
|
||||
target=_get_node_id(edge.target),
|
||||
)
|
||||
for edge in self.edges
|
||||
],
|
||||
)
|
||||
|
||||
def first_node(self) -> Optional[Node]:
|
||||
@@ -357,7 +398,7 @@ class Graph:
|
||||
from langchain_core.runnables.graph_ascii import draw_ascii
|
||||
|
||||
return draw_ascii(
|
||||
{node.id: node_data_str(node) for node in self.nodes.values()},
|
||||
{node.id: node.name for node in self.nodes.values()},
|
||||
self.edges,
|
||||
)
|
||||
|
||||
@@ -388,9 +429,7 @@ class Graph:
|
||||
) -> Union[bytes, None]:
|
||||
from langchain_core.runnables.graph_png import PngDrawer
|
||||
|
||||
default_node_labels = {
|
||||
node.id: node_data_str(node) for node in self.nodes.values()
|
||||
}
|
||||
default_node_labels = {node.id: node.name for node in self.nodes.values()}
|
||||
|
||||
return PngDrawer(
|
||||
fontname,
|
||||
@@ -415,19 +454,15 @@ class Graph:
|
||||
) -> str:
|
||||
from langchain_core.runnables.graph_mermaid import draw_mermaid
|
||||
|
||||
nodes = {node.id: node_data_str(node) for node in self.nodes.values()}
|
||||
|
||||
first_node = self.first_node()
|
||||
first_label = node_data_str(first_node) if first_node is not None else None
|
||||
|
||||
last_node = self.last_node()
|
||||
last_label = node_data_str(last_node) if last_node is not None else None
|
||||
graph = self.reid()
|
||||
first_node = graph.first_node()
|
||||
last_node = graph.last_node()
|
||||
|
||||
return draw_mermaid(
|
||||
nodes=nodes,
|
||||
edges=self.edges,
|
||||
first_node_label=first_label,
|
||||
last_node_label=last_label,
|
||||
nodes=graph.nodes,
|
||||
edges=graph.edges,
|
||||
first_node=first_node.id if first_node else None,
|
||||
last_node=last_node.id if last_node else None,
|
||||
with_styles=with_styles,
|
||||
curve_style=curve_style,
|
||||
node_colors=node_colors,
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
import base64
|
||||
import re
|
||||
from dataclasses import asdict
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from langchain_core.runnables.graph import (
|
||||
CurveStyle,
|
||||
Edge,
|
||||
MermaidDrawMethod,
|
||||
Node,
|
||||
NodeColors,
|
||||
)
|
||||
|
||||
|
||||
def draw_mermaid(
|
||||
nodes: Dict[str, str],
|
||||
nodes: Dict[str, Node],
|
||||
edges: List[Edge],
|
||||
*,
|
||||
first_node_label: Optional[str] = None,
|
||||
last_node_label: Optional[str] = None,
|
||||
first_node: Optional[str] = None,
|
||||
last_node: Optional[str] = None,
|
||||
with_styles: bool = True,
|
||||
curve_style: CurveStyle = CurveStyle.LINEAR,
|
||||
node_colors: NodeColors = NodeColors(),
|
||||
@@ -49,15 +50,20 @@ def draw_mermaid(
|
||||
# Node formatting templates
|
||||
default_class_label = "default"
|
||||
format_dict = {default_class_label: "{0}([{1}]):::otherclass"}
|
||||
if first_node_label is not None:
|
||||
format_dict[first_node_label] = "{0}[{0}]:::startclass"
|
||||
if last_node_label is not None:
|
||||
format_dict[last_node_label] = "{0}[{0}]:::endclass"
|
||||
if first_node is not None:
|
||||
format_dict[first_node] = "{0}[{0}]:::startclass"
|
||||
if last_node is not None:
|
||||
format_dict[last_node] = "{0}[{0}]:::endclass"
|
||||
|
||||
# Add nodes to the graph
|
||||
for node in nodes.values():
|
||||
node_label = format_dict.get(node, format_dict[default_class_label]).format(
|
||||
_escape_node_label(node), node.split(":", 1)[-1]
|
||||
for key, node in nodes.items():
|
||||
label = node.name.split(":")[-1]
|
||||
if node.metadata:
|
||||
label = f"<strong>{label}</strong>\n" + "\n".join(
|
||||
f"{key} = {value}" for key, value in node.metadata.items()
|
||||
)
|
||||
node_label = format_dict.get(key, format_dict[default_class_label]).format(
|
||||
_escape_node_label(key), label
|
||||
)
|
||||
mermaid_graph += f"\t{node_label};\n"
|
||||
|
||||
@@ -74,9 +80,8 @@ def draw_mermaid(
|
||||
if not subgraph and src_prefix and src_prefix == tgt_prefix:
|
||||
mermaid_graph += f"\tsubgraph {src_prefix}\n"
|
||||
subgraph = src_prefix
|
||||
adjusted_edge = _adjust_mermaid_edge(edge=edge, nodes=nodes)
|
||||
|
||||
source, target = adjusted_edge
|
||||
source, target = edge.source, edge.target
|
||||
|
||||
# Add BR every wrap_label_n_words words
|
||||
if edge.data is not None:
|
||||
@@ -117,17 +122,6 @@ def _escape_node_label(node_label: str) -> str:
|
||||
return re.sub(r"[^a-zA-Z-_0-9]", "_", node_label)
|
||||
|
||||
|
||||
def _adjust_mermaid_edge(
|
||||
edge: Edge,
|
||||
nodes: Dict[str, str],
|
||||
) -> Tuple[str, str]:
|
||||
"""Adjusts Mermaid edge to map conditional nodes to pure nodes."""
|
||||
source_node_label = nodes.get(edge.source, edge.source)
|
||||
target_node_label = nodes.get(edge.target, edge.target)
|
||||
|
||||
return source_node_label, target_node_label
|
||||
|
||||
|
||||
def _generate_mermaid_graph_styles(node_colors: NodeColors) -> str:
|
||||
"""Generates Mermaid graph styles for different node types."""
|
||||
styles = ""
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Sequence
|
||||
from typing import Any, Dict, List, Literal, Sequence, Union
|
||||
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
|
||||
@@ -37,7 +37,7 @@ class EventData(TypedDict, total=False):
|
||||
"""
|
||||
|
||||
|
||||
class StreamEvent(TypedDict):
|
||||
class BaseStreamEvent(TypedDict):
|
||||
"""Streaming event.
|
||||
|
||||
Schema of a streaming event which is produced from the astream_events method.
|
||||
@@ -101,8 +101,6 @@ class StreamEvent(TypedDict):
|
||||
|
||||
Please see the documentation for `EventData` for more details.
|
||||
"""
|
||||
name: str
|
||||
"""The name of the runnable that generated the event."""
|
||||
run_id: str
|
||||
"""An randomly generated ID to keep track of the execution of the given runnable.
|
||||
|
||||
@@ -128,11 +126,6 @@ class StreamEvent(TypedDict):
|
||||
|
||||
`.astream_events(..., {"metadata": {"foo": "bar"}})`.
|
||||
"""
|
||||
data: EventData
|
||||
"""Event data.
|
||||
|
||||
The contents of the event data depend on the event type.
|
||||
"""
|
||||
|
||||
parent_ids: Sequence[str]
|
||||
"""A list of the parent IDs associated with this event.
|
||||
@@ -146,3 +139,34 @@ class StreamEvent(TypedDict):
|
||||
|
||||
Only supported as of v2 of the astream events API. v1 will return an empty list.
|
||||
"""
|
||||
|
||||
|
||||
class StandardStreamEvent(BaseStreamEvent):
|
||||
"""A standard stream event that follows LangChain convention for event data."""
|
||||
|
||||
data: EventData
|
||||
"""Event data.
|
||||
|
||||
The contents of the event data depend on the event type.
|
||||
"""
|
||||
name: str
|
||||
"""The name of the runnable that generated the event."""
|
||||
|
||||
|
||||
class CustomStreamEvent(BaseStreamEvent):
|
||||
"""A custom stream event created by the user.
|
||||
|
||||
.. versionadded:: 0.2.14
|
||||
"""
|
||||
|
||||
# Overwrite the event field to be more specific.
|
||||
event: Literal["on_custom_event"] # type: ignore[misc]
|
||||
|
||||
"""The event type."""
|
||||
name: str
|
||||
"""A user defined name for the event."""
|
||||
data: Any
|
||||
"""The data associated with the event. Free form and can be anything."""
|
||||
|
||||
|
||||
StreamEvent = Union[StandardStreamEvent, CustomStreamEvent]
|
||||
|
||||
@@ -39,6 +39,7 @@ from typing import (
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
get_type_hints,
|
||||
)
|
||||
|
||||
from typing_extensions import Annotated, get_args, get_origin
|
||||
@@ -1218,3 +1219,76 @@ class BaseToolkit(BaseModel, ABC):
|
||||
@abstractmethod
|
||||
def get_tools(self) -> List[BaseTool]:
|
||||
"""Get the tools in the toolkit."""
|
||||
|
||||
|
||||
def _get_description_from_runnable(runnable: Runnable) -> str:
|
||||
"""Generate a placeholder description of a runnable."""
|
||||
input_schema = runnable.input_schema.schema()
|
||||
return f"Takes {input_schema}."
|
||||
|
||||
|
||||
def _get_schema_from_runnable_and_arg_types(
|
||||
runnable: Runnable,
|
||||
name: str,
|
||||
arg_types: Optional[Dict[str, Type]] = None,
|
||||
) -> Type[BaseModel]:
|
||||
"""Infer args_schema for tool."""
|
||||
if arg_types is None:
|
||||
try:
|
||||
arg_types = get_type_hints(runnable.InputType)
|
||||
except TypeError as e:
|
||||
raise TypeError(
|
||||
"Tool input must be str or dict. If dict, dict arguments must be "
|
||||
"typed. Either annotate types (e.g., with TypedDict) or pass "
|
||||
f"arg_types into `.as_tool` to specify. {str(e)}"
|
||||
)
|
||||
fields = {key: (key_type, Field(...)) for key, key_type in arg_types.items()}
|
||||
return create_model(name, **fields) # type: ignore
|
||||
|
||||
|
||||
def convert_runnable_to_tool(
|
||||
runnable: Runnable,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
arg_types: Optional[Dict[str, Type]] = None,
|
||||
) -> BaseTool:
|
||||
"""Convert a Runnable into a BaseTool."""
|
||||
description = description or _get_description_from_runnable(runnable)
|
||||
name = name or runnable.get_name()
|
||||
|
||||
schema = runnable.input_schema.schema()
|
||||
if schema.get("type") == "string":
|
||||
return Tool(
|
||||
name=name,
|
||||
func=runnable.invoke,
|
||||
coroutine=runnable.ainvoke,
|
||||
description=description,
|
||||
)
|
||||
else:
|
||||
|
||||
async def ainvoke_wrapper(
|
||||
callbacks: Optional[Callbacks] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
return await runnable.ainvoke(kwargs, config={"callbacks": callbacks})
|
||||
|
||||
def invoke_wrapper(callbacks: Optional[Callbacks] = None, **kwargs: Any) -> Any:
|
||||
return runnable.invoke(kwargs, config={"callbacks": callbacks})
|
||||
|
||||
if (
|
||||
arg_types is None
|
||||
and schema.get("type") == "object"
|
||||
and schema.get("properties")
|
||||
):
|
||||
args_schema = runnable.input_schema
|
||||
else:
|
||||
args_schema = _get_schema_from_runnable_and_arg_types(
|
||||
runnable, name, arg_types=arg_types
|
||||
)
|
||||
|
||||
return StructuredTool.from_function(
|
||||
name=name,
|
||||
func=invoke_wrapper,
|
||||
coroutine=ainvoke_wrapper,
|
||||
description=description,
|
||||
args_schema=args_schema,
|
||||
)
|
||||
|
||||
@@ -28,7 +28,12 @@ from langchain_core.outputs import (
|
||||
GenerationChunk,
|
||||
LLMResult,
|
||||
)
|
||||
from langchain_core.runnables.schema import EventData, StreamEvent
|
||||
from langchain_core.runnables.schema import (
|
||||
CustomStreamEvent,
|
||||
EventData,
|
||||
StandardStreamEvent,
|
||||
StreamEvent,
|
||||
)
|
||||
from langchain_core.runnables.utils import (
|
||||
Input,
|
||||
Output,
|
||||
@@ -161,7 +166,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
||||
return
|
||||
if tap is sentinel:
|
||||
# if we are the first to tap, issue stream events
|
||||
event: StreamEvent = {
|
||||
event: StandardStreamEvent = {
|
||||
"event": f"on_{run_info['run_type']}_stream",
|
||||
"run_id": str(run_id),
|
||||
"name": run_info["name"],
|
||||
@@ -203,7 +208,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
||||
return
|
||||
if tap is sentinel:
|
||||
# if we are the first to tap, issue stream events
|
||||
event: StreamEvent = {
|
||||
event: StandardStreamEvent = {
|
||||
"event": f"on_{run_info['run_type']}_stream",
|
||||
"run_id": str(run_id),
|
||||
"name": run_info["name"],
|
||||
@@ -341,6 +346,28 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
||||
run_type,
|
||||
)
|
||||
|
||||
async def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Generate a custom astream event."""
|
||||
event = CustomStreamEvent(
|
||||
event="on_custom_event",
|
||||
run_id=str(run_id),
|
||||
name=name,
|
||||
tags=tags or [],
|
||||
metadata=metadata or {},
|
||||
data=data,
|
||||
parent_ids=self._get_parent_ids(run_id),
|
||||
)
|
||||
self._send(event, name)
|
||||
|
||||
async def on_llm_new_token(
|
||||
self,
|
||||
token: str,
|
||||
@@ -678,7 +705,7 @@ async def _astream_events_implementation_v1(
|
||||
exclude_types: Optional[Sequence[str]] = None,
|
||||
exclude_tags: Optional[Sequence[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
) -> AsyncIterator[StandardStreamEvent]:
|
||||
from langchain_core.runnables import ensure_config
|
||||
from langchain_core.runnables.utils import _RootEventFilter
|
||||
from langchain_core.tracers.log_stream import (
|
||||
@@ -733,7 +760,7 @@ async def _astream_events_implementation_v1(
|
||||
encountered_start_event = True
|
||||
state = run_log.state.copy()
|
||||
|
||||
event = StreamEvent(
|
||||
event = StandardStreamEvent(
|
||||
event=f"on_{state['type']}_start",
|
||||
run_id=state["id"],
|
||||
name=root_name,
|
||||
@@ -798,7 +825,7 @@ async def _astream_events_implementation_v1(
|
||||
# And this avoids duplicates as well!
|
||||
log_entry["streamed_output"] = []
|
||||
|
||||
yield StreamEvent(
|
||||
yield StandardStreamEvent(
|
||||
event=f"on_{log_entry['type']}_{event_type}",
|
||||
name=log_entry["name"],
|
||||
run_id=log_entry["id"],
|
||||
@@ -824,7 +851,7 @@ async def _astream_events_implementation_v1(
|
||||
# Clean up the stream, we don't need it anymore.
|
||||
state["streamed_output"] = []
|
||||
|
||||
event = StreamEvent(
|
||||
event = StandardStreamEvent(
|
||||
event=f"on_{state['type']}_stream",
|
||||
run_id=state["id"],
|
||||
tags=root_tags,
|
||||
@@ -839,7 +866,7 @@ async def _astream_events_implementation_v1(
|
||||
state = run_log.state
|
||||
|
||||
# Finally yield the end event for the root runnable.
|
||||
event = StreamEvent(
|
||||
event = StandardStreamEvent(
|
||||
event=f"on_{state['type']}_end",
|
||||
name=root_name,
|
||||
run_id=state["id"],
|
||||
@@ -866,7 +893,7 @@ async def _astream_events_implementation_v2(
|
||||
exclude_types: Optional[Sequence[str]] = None,
|
||||
exclude_tags: Optional[Sequence[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
) -> AsyncIterator[StandardStreamEvent]:
|
||||
"""Implementation of the astream events API for V2 runnables."""
|
||||
from langchain_core.callbacks.base import BaseCallbackManager
|
||||
from langchain_core.runnables import ensure_config
|
||||
|
||||
@@ -3,8 +3,8 @@ from __future__ import annotations
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
def merge_dicts(left: Dict[str, Any], right: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Merge two dicts, handling specific scenarios where a key exists in both
|
||||
def merge_dicts(left: Dict[str, Any], *others: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Merge many dicts, handling specific scenarios where a key exists in both
|
||||
dictionaries but has a value of None in 'left'. In such cases, the method uses the
|
||||
value from 'right' for that key in the merged dictionary.
|
||||
|
||||
@@ -16,57 +16,82 @@ def merge_dicts(left: Dict[str, Any], right: Dict[str, Any]) -> Dict[str, Any]:
|
||||
resulting in merged = {"function_call": {"arguments": "{\n"}}.
|
||||
"""
|
||||
merged = left.copy()
|
||||
for right_k, right_v in right.items():
|
||||
if right_k not in merged:
|
||||
merged[right_k] = right_v
|
||||
elif right_v is not None and merged[right_k] is None:
|
||||
merged[right_k] = right_v
|
||||
elif right_v is None:
|
||||
continue
|
||||
elif type(merged[right_k]) is not type(right_v):
|
||||
raise TypeError(
|
||||
f'additional_kwargs["{right_k}"] already exists in this message,'
|
||||
" but with a different type."
|
||||
)
|
||||
elif isinstance(merged[right_k], str):
|
||||
merged[right_k] += right_v
|
||||
elif isinstance(merged[right_k], dict):
|
||||
merged[right_k] = merge_dicts(merged[right_k], right_v)
|
||||
elif isinstance(merged[right_k], list):
|
||||
merged[right_k] = merge_lists(merged[right_k], right_v)
|
||||
elif merged[right_k] == right_v:
|
||||
continue
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Additional kwargs key {right_k} already exists in left dict and "
|
||||
f"value has unsupported type {type(merged[right_k])}."
|
||||
)
|
||||
for right in others:
|
||||
for right_k, right_v in right.items():
|
||||
if right_k not in merged:
|
||||
merged[right_k] = right_v
|
||||
elif right_v is not None and merged[right_k] is None:
|
||||
merged[right_k] = right_v
|
||||
elif right_v is None:
|
||||
continue
|
||||
elif type(merged[right_k]) is not type(right_v):
|
||||
raise TypeError(
|
||||
f'additional_kwargs["{right_k}"] already exists in this message,'
|
||||
" but with a different type."
|
||||
)
|
||||
elif isinstance(merged[right_k], str):
|
||||
merged[right_k] += right_v
|
||||
elif isinstance(merged[right_k], dict):
|
||||
merged[right_k] = merge_dicts(merged[right_k], right_v)
|
||||
elif isinstance(merged[right_k], list):
|
||||
merged[right_k] = merge_lists(merged[right_k], right_v)
|
||||
elif merged[right_k] == right_v:
|
||||
continue
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Additional kwargs key {right_k} already exists in left dict and "
|
||||
f"value has unsupported type {type(merged[right_k])}."
|
||||
)
|
||||
return merged
|
||||
|
||||
|
||||
def merge_lists(left: Optional[List], right: Optional[List]) -> Optional[List]:
|
||||
"""Add two lists, handling None."""
|
||||
if left is None and right is None:
|
||||
return None
|
||||
elif left is None or right is None:
|
||||
return left or right
|
||||
else:
|
||||
merged = left.copy()
|
||||
for e in right:
|
||||
if isinstance(e, dict) and "index" in e and isinstance(e["index"], int):
|
||||
to_merge = [
|
||||
i
|
||||
for i, e_left in enumerate(merged)
|
||||
if e_left["index"] == e["index"]
|
||||
]
|
||||
if to_merge:
|
||||
# If a top-level "type" has been set for a chunk, it should no
|
||||
# longer be overridden by the "type" field in future chunks.
|
||||
if "type" in merged[to_merge[0]] and "type" in e:
|
||||
e.pop("type")
|
||||
merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], e)
|
||||
def merge_lists(left: Optional[List], *others: Optional[List]) -> Optional[List]:
|
||||
"""Add many lists, handling None."""
|
||||
merged = left.copy() if left is not None else None
|
||||
for other in others:
|
||||
if other is None:
|
||||
continue
|
||||
elif merged is None:
|
||||
merged = other.copy()
|
||||
else:
|
||||
for e in other:
|
||||
if isinstance(e, dict) and "index" in e and isinstance(e["index"], int):
|
||||
to_merge = [
|
||||
i
|
||||
for i, e_left in enumerate(merged)
|
||||
if e_left["index"] == e["index"]
|
||||
]
|
||||
if to_merge:
|
||||
# If a top-level "type" has been set for a chunk, it should no
|
||||
# longer be overridden by the "type" field in future chunks.
|
||||
if "type" in merged[to_merge[0]] and "type" in e:
|
||||
e.pop("type")
|
||||
merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], e)
|
||||
else:
|
||||
merged.append(e)
|
||||
else:
|
||||
merged = merged + [e]
|
||||
else:
|
||||
merged = merged + [e]
|
||||
return merged
|
||||
merged.append(e)
|
||||
return merged
|
||||
|
||||
|
||||
def merge_obj(left: Any, right: Any) -> Any:
|
||||
if left is None or right is None:
|
||||
return left if left is not None else right
|
||||
elif type(left) is not type(right):
|
||||
raise TypeError(
|
||||
f"left and right are of different types. Left type: {type(left)}. Right "
|
||||
f"type: {type(right)}."
|
||||
)
|
||||
elif isinstance(left, str):
|
||||
return left + right
|
||||
elif isinstance(left, dict):
|
||||
return merge_dicts(left, right)
|
||||
elif isinstance(left, list):
|
||||
return merge_lists(left, right)
|
||||
elif left == right:
|
||||
return left
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unable to merge {left=} and {right=}. Both must be of type str, dict, or "
|
||||
f"list, or else be two equal objects."
|
||||
)
|
||||
|
||||
42
libs/core/langchain_core/utils/curry.py
Normal file
42
libs/core/langchain_core/utils/curry.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import asyncio
|
||||
import inspect
|
||||
from functools import wraps
|
||||
from typing import Any, Callable
|
||||
|
||||
|
||||
def curry(func: Callable[..., Any], **curried_kwargs: Any) -> Callable[..., Any]:
|
||||
"""Util that wraps a function and partially applies kwargs to it.
|
||||
Returns a new function whose signature omits the curried variables.
|
||||
|
||||
Args:
|
||||
func: The function to curry.
|
||||
curried_kwargs: Arguments to apply to the function.
|
||||
|
||||
Returns:
|
||||
A new function with curried arguments applied.
|
||||
|
||||
.. versionadded:: 0.2.18
|
||||
"""
|
||||
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
new_kwargs = {**curried_kwargs, **kwargs}
|
||||
return await func(*args, **new_kwargs)
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
new_kwargs = {**curried_kwargs, **kwargs}
|
||||
return func(*args, **new_kwargs)
|
||||
|
||||
sig = inspect.signature(func)
|
||||
# Create a new signature without the curried parameters
|
||||
new_params = [p for name, p in sig.parameters.items() if name not in curried_kwargs]
|
||||
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
async_wrapper = wraps(func)(async_wrapper)
|
||||
setattr(async_wrapper, "__signature__", sig.replace(parameters=new_params))
|
||||
return async_wrapper
|
||||
else:
|
||||
sync_wrapper = wraps(func)(sync_wrapper)
|
||||
setattr(sync_wrapper, "__signature__", sig.replace(parameters=new_params))
|
||||
return sync_wrapper
|
||||
@@ -58,7 +58,7 @@ def parse_partial_json(s: str, *, strict: bool = False) -> Any:
|
||||
pass
|
||||
|
||||
# Initialize variables.
|
||||
new_s = ""
|
||||
new_chars = []
|
||||
stack = []
|
||||
is_inside_string = False
|
||||
escaped = False
|
||||
@@ -90,29 +90,27 @@ def parse_partial_json(s: str, *, strict: bool = False) -> Any:
|
||||
return None
|
||||
|
||||
# Append the processed character to the new string.
|
||||
new_s += char
|
||||
new_chars.append(char)
|
||||
|
||||
# If we're still inside a string at the end of processing,
|
||||
# we need to close the string.
|
||||
if is_inside_string:
|
||||
new_s += '"'
|
||||
new_chars.append('"')
|
||||
|
||||
# Reverse the stack to get the closing characters.
|
||||
stack.reverse()
|
||||
|
||||
# Try to parse mods of string until we succeed or run out of characters.
|
||||
while new_s:
|
||||
final_s = new_s
|
||||
|
||||
while new_chars:
|
||||
# Close any remaining open structures in the reverse
|
||||
# order that they were opened.
|
||||
for closing_char in reversed(stack):
|
||||
final_s += closing_char
|
||||
|
||||
# Attempt to parse the modified string as JSON.
|
||||
try:
|
||||
return json.loads(final_s, strict=strict)
|
||||
return json.loads("".join(new_chars + stack), strict=strict)
|
||||
except json.JSONDecodeError:
|
||||
# If we still can't parse the string as JSON,
|
||||
# try removing the last character
|
||||
new_s = new_s[:-1]
|
||||
new_chars.pop()
|
||||
|
||||
# If we got here, we ran out of characters to remove
|
||||
# and still couldn't parse the string as JSON, so return the parse error
|
||||
@@ -120,6 +118,9 @@ def parse_partial_json(s: str, *, strict: bool = False) -> Any:
|
||||
return json.loads(s, strict=strict)
|
||||
|
||||
|
||||
_json_markdown_re = re.compile(r"```(json)?(.*)", re.DOTALL)
|
||||
|
||||
|
||||
def parse_json_markdown(
|
||||
json_string: str, *, parser: Callable[[str], Any] = parse_partial_json
|
||||
) -> dict:
|
||||
@@ -136,7 +137,7 @@ def parse_json_markdown(
|
||||
return _parse_json(json_string, parser=parser)
|
||||
except json.JSONDecodeError:
|
||||
# Try to find JSON string within triple backticks
|
||||
match = re.search(r"```(json)?(.*)", json_string, re.DOTALL)
|
||||
match = _json_markdown_re.search(json_string)
|
||||
|
||||
# If no match found, assume the entire string is a JSON string
|
||||
if match is None:
|
||||
@@ -147,11 +148,14 @@ def parse_json_markdown(
|
||||
return _parse_json(json_str, parser=parser)
|
||||
|
||||
|
||||
_json_strip_chars = " \n\r\t`"
|
||||
|
||||
|
||||
def _parse_json(
|
||||
json_str: str, *, parser: Callable[[str], Any] = parse_partial_json
|
||||
) -> dict:
|
||||
# Strip whitespace and newlines from the start and end
|
||||
json_str = json_str.strip().strip("`")
|
||||
# Strip whitespace,newlines,backtick from the start and end
|
||||
json_str = json_str.strip(_json_strip_chars)
|
||||
|
||||
# handle newlines and other special characters inside the returned value
|
||||
json_str = _custom_parser(json_str)
|
||||
|
||||
6
libs/core/poetry.lock
generated
6
libs/core/poetry.lock
generated
@@ -1233,13 +1233,13 @@ url = "../text-splitters"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.1.84"
|
||||
version = "0.1.85"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "langsmith-0.1.84-py3-none-any.whl", hash = "sha256:01f3c6390dba26c583bac8dd0e551ce3d0509c7f55cad714db0b5c8d36e4c7ff"},
|
||||
{file = "langsmith-0.1.84.tar.gz", hash = "sha256:5220c0439838b9a5bd320fd3686be505c5083dcee22d2452006c23891153bea1"},
|
||||
{file = "langsmith-0.1.85-py3-none-any.whl", hash = "sha256:c1f94384f10cea96f7b4d33fd3db7ec180c03c7468877d50846f881d2017ff94"},
|
||||
{file = "langsmith-0.1.85.tar.gz", hash = "sha256:acff31f9e53efa48586cf8e32f65625a335c74d7c4fa306d1655ac18452296f6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "langchain-core"
|
||||
version = "0.2.12"
|
||||
version = "0.2.14"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
@@ -38,6 +38,11 @@ python = ">=3.12.4"
|
||||
[tool.ruff.lint]
|
||||
select = [ "E", "F", "I", "T201",]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/unit_tests/prompts/test_chat.py" = ["E501"]
|
||||
"tests/unit_tests/runnables/test_runnable.py" = ["E501"]
|
||||
"tests/unit_tests/runnables/test_graph.py" = ["E501"]
|
||||
|
||||
[tool.coverage.run]
|
||||
omit = [ "tests/*",]
|
||||
|
||||
|
||||
@@ -0,0 +1,161 @@
|
||||
import sys
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler
|
||||
from langchain_core.callbacks.manager import (
|
||||
adispatch_custom_event,
|
||||
dispatch_custom_event,
|
||||
)
|
||||
from langchain_core.runnables import RunnableLambda
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
|
||||
|
||||
class AsyncCustomCallbackHandler(AsyncCallbackHandler):
|
||||
def __init__(self) -> None:
|
||||
self.events: List[Any] = []
|
||||
|
||||
async def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
assert kwargs == {}
|
||||
self.events.append(
|
||||
(
|
||||
name,
|
||||
data,
|
||||
run_id,
|
||||
tags,
|
||||
metadata,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def test_custom_event_root_dispatch() -> None:
|
||||
"""Test adhoc event in a nested chain."""
|
||||
# This just tests that nothing breaks on the path.
|
||||
# It shouldn't do anything at the moment, since the tracer isn't configured
|
||||
# to handle adhoc events.
|
||||
# Expected behavior is that the event cannot be dispatched
|
||||
with pytest.raises(RuntimeError):
|
||||
dispatch_custom_event("event1", {"x": 1})
|
||||
|
||||
|
||||
async def test_async_custom_event_root_dispatch() -> None:
|
||||
"""Test adhoc event in a nested chain."""
|
||||
# This just tests that nothing breaks on the path.
|
||||
# It shouldn't do anything at the moment, since the tracer isn't configured
|
||||
# to handle adhoc events.
|
||||
# Expected behavior is that the event cannot be dispatched
|
||||
with pytest.raises(RuntimeError):
|
||||
await adispatch_custom_event("event1", {"x": 1})
|
||||
|
||||
|
||||
IS_GTE_3_11 = sys.version_info >= (3, 11)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not IS_GTE_3_11, reason="Requires Python >=3.11")
|
||||
async def test_async_custom_event_implicit_config() -> None:
|
||||
"""Test dispatch without passing config explicitly."""
|
||||
callback = AsyncCustomCallbackHandler()
|
||||
|
||||
run_id = uuid.UUID(int=7)
|
||||
|
||||
# Typing not working well with RunnableLambda when used as
|
||||
# a decorator for async functions
|
||||
@RunnableLambda # type: ignore[arg-type]
|
||||
async def foo(x: int, config: RunnableConfig) -> int:
|
||||
await adispatch_custom_event("event1", {"x": x})
|
||||
await adispatch_custom_event("event2", {"x": x})
|
||||
return x
|
||||
|
||||
await foo.ainvoke(
|
||||
1, # type: ignore[arg-type]
|
||||
{"callbacks": [callback], "run_id": run_id},
|
||||
)
|
||||
|
||||
assert callback.events == [
|
||||
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
|
||||
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
|
||||
]
|
||||
|
||||
|
||||
async def test_async_callback_manager() -> None:
|
||||
"""Test async callback manager."""
|
||||
|
||||
callback = AsyncCustomCallbackHandler()
|
||||
|
||||
run_id = uuid.UUID(int=7)
|
||||
|
||||
# Typing not working well with RunnableLambda when used as
|
||||
# a decorator for async functions
|
||||
@RunnableLambda # type: ignore[arg-type]
|
||||
async def foo(x: int, config: RunnableConfig) -> int:
|
||||
await adispatch_custom_event("event1", {"x": x}, config=config)
|
||||
await adispatch_custom_event("event2", {"x": x}, config=config)
|
||||
return x
|
||||
|
||||
await foo.ainvoke(
|
||||
1, # type: ignore[arg-type]
|
||||
{"callbacks": [callback], "run_id": run_id},
|
||||
)
|
||||
|
||||
assert callback.events == [
|
||||
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
|
||||
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
|
||||
]
|
||||
|
||||
|
||||
def test_sync_callback_manager() -> None:
|
||||
"""Test async callback manager."""
|
||||
|
||||
class CustomCallbackManager(BaseCallbackHandler):
|
||||
def __init__(self) -> None:
|
||||
self.events: List[Any] = []
|
||||
|
||||
def on_custom_event(
|
||||
self,
|
||||
name: str,
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
assert kwargs == {}
|
||||
self.events.append(
|
||||
(
|
||||
name,
|
||||
data,
|
||||
run_id,
|
||||
tags,
|
||||
metadata,
|
||||
)
|
||||
)
|
||||
|
||||
callback = CustomCallbackManager()
|
||||
|
||||
run_id = uuid.UUID(int=7)
|
||||
|
||||
@RunnableLambda
|
||||
def foo(x: int, config: RunnableConfig) -> int:
|
||||
dispatch_custom_event("event1", {"x": x})
|
||||
dispatch_custom_event("event2", {"x": x}, config=config)
|
||||
return x
|
||||
|
||||
foo.invoke(1, {"callbacks": [callback], "run_id": run_id})
|
||||
|
||||
assert callback.events == [
|
||||
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
|
||||
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
|
||||
]
|
||||
@@ -31,6 +31,8 @@ EXPECTED_ALL = [
|
||||
"StdOutCallbackHandler",
|
||||
"StreamingStdOutCallbackHandler",
|
||||
"FileCallbackHandler",
|
||||
"adispatch_custom_event",
|
||||
"dispatch_custom_event",
|
||||
]
|
||||
|
||||
|
||||
|
||||
1184
libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr
Normal file
1184
libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr
Normal file
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@ from pathlib import Path
|
||||
from typing import Any, List, Union
|
||||
|
||||
import pytest
|
||||
from syrupy import SnapshotAssertion
|
||||
|
||||
from langchain_core._api.deprecation import (
|
||||
LangChainPendingDeprecationWarning,
|
||||
@@ -789,7 +790,7 @@ async def test_messages_prompt_accepts_list() -> None:
|
||||
await prompt.ainvoke([("user", "Hi there")]) # type: ignore
|
||||
|
||||
|
||||
def test_chat_input_schema() -> None:
|
||||
def test_chat_input_schema(snapshot: SnapshotAssertion) -> None:
|
||||
prompt_all_required = ChatPromptTemplate.from_messages(
|
||||
messages=[MessagesPlaceholder("history", optional=False), ("user", "${input}")]
|
||||
)
|
||||
@@ -797,601 +798,10 @@ def test_chat_input_schema() -> None:
|
||||
prompt_all_required.optional_variables == {"history"}
|
||||
with pytest.raises(ValidationError):
|
||||
prompt_all_required.input_schema(input="")
|
||||
assert prompt_all_required.input_schema.schema() == {
|
||||
"title": "PromptInput",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"history": {
|
||||
"title": "History",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"$ref": "#/definitions/AIMessage"},
|
||||
{"$ref": "#/definitions/HumanMessage"},
|
||||
{"$ref": "#/definitions/ChatMessage"},
|
||||
{"$ref": "#/definitions/SystemMessage"},
|
||||
{"$ref": "#/definitions/FunctionMessage"},
|
||||
{"$ref": "#/definitions/ToolMessage"},
|
||||
]
|
||||
},
|
||||
},
|
||||
"input": {"title": "Input", "type": "string"},
|
||||
},
|
||||
"required": ["history", "input"],
|
||||
"definitions": {
|
||||
"ToolCall": {
|
||||
"title": "ToolCall",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"args": {"title": "Args", "type": "object"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["name", "args", "id"],
|
||||
},
|
||||
"InvalidToolCall": {
|
||||
"title": "InvalidToolCall",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"args": {"title": "Args", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"error": {"title": "Error", "type": "string"},
|
||||
},
|
||||
"required": ["name", "args", "id", "error"],
|
||||
},
|
||||
"UsageMetadata": {
|
||||
"title": "UsageMetadata",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input_tokens": {"title": "Input Tokens", "type": "integer"},
|
||||
"output_tokens": {"title": "Output Tokens", "type": "integer"},
|
||||
"total_tokens": {"title": "Total Tokens", "type": "integer"},
|
||||
},
|
||||
"required": ["input_tokens", "output_tokens", "total_tokens"],
|
||||
},
|
||||
"AIMessage": {
|
||||
"title": "AIMessage",
|
||||
"description": "Message from an AI.\n\nAIMessage is returned from a chat model as a response to a prompt.\n\nThis message represents the output of the model and consists of both\nthe raw output as returned by the model together standardized fields\n(e.g., tool calls, usage metadata) added by the LangChain framework.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "ai",
|
||||
"enum": ["ai"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"example": {
|
||||
"title": "Example",
|
||||
"default": False,
|
||||
"type": "boolean",
|
||||
},
|
||||
"tool_calls": {
|
||||
"title": "Tool Calls",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/ToolCall"},
|
||||
},
|
||||
"invalid_tool_calls": {
|
||||
"title": "Invalid Tool Calls",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/InvalidToolCall"},
|
||||
},
|
||||
"usage_metadata": {"$ref": "#/definitions/UsageMetadata"},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"HumanMessage": {
|
||||
"title": "HumanMessage",
|
||||
"description": 'Message from a human.\n\nHumanMessages are messages that are passed in from a human to the model.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Instantiate a chat model and invoke it with the messages\n model = ...\n print(model.invoke(messages))', # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "human",
|
||||
"enum": ["human"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"example": {
|
||||
"title": "Example",
|
||||
"default": False,
|
||||
"type": "boolean",
|
||||
},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"ChatMessage": {
|
||||
"title": "ChatMessage",
|
||||
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "chat",
|
||||
"enum": ["chat"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"role": {"title": "Role", "type": "string"},
|
||||
},
|
||||
"required": ["content", "role"],
|
||||
},
|
||||
"SystemMessage": {
|
||||
"title": "SystemMessage",
|
||||
"description": 'Message for priming AI behavior.\n\nThe system message is usually passed in as the first of a sequence\nof input messages.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Define a chat model and invoke it with the messages\n print(model.invoke(messages))', # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "system",
|
||||
"enum": ["system"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"FunctionMessage": {
|
||||
"title": "FunctionMessage",
|
||||
"description": "Message for passing the result of executing a tool back to a model.\n\nFunctionMessage are an older version of the ToolMessage schema, and\ndo not contain the tool_call_id field.\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "function",
|
||||
"enum": ["function"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["content", "name"],
|
||||
},
|
||||
"ToolMessage": {
|
||||
"title": "ToolMessage",
|
||||
"description": "Message for passing the result of executing a tool back to a model.\n\nToolMessages contain the result of a tool invocation. Typically, the result\nis encoded inside the `content` field.\n\nExample: A TooMessage representing a result of 42 from a tool call with id\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "tool",
|
||||
"enum": ["tool"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"tool_call_id": {"title": "Tool Call Id", "type": "string"},
|
||||
},
|
||||
"required": ["content", "tool_call_id"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert prompt_all_required.input_schema.schema() == snapshot(name="required")
|
||||
prompt_optional = ChatPromptTemplate.from_messages(
|
||||
messages=[MessagesPlaceholder("history", optional=True), ("user", "${input}")]
|
||||
)
|
||||
prompt_optional.input_variables == {"history", "input"}
|
||||
prompt_optional.input_schema(input="") # won't raise error
|
||||
prompt_optional.input_schema.schema() == {
|
||||
"title": "PromptInput",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input": {"title": "Input", "type": "string"},
|
||||
"history": {
|
||||
"title": "History",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"$ref": "#/definitions/AIMessage"},
|
||||
{"$ref": "#/definitions/HumanMessage"},
|
||||
{"$ref": "#/definitions/ChatMessage"},
|
||||
{"$ref": "#/definitions/SystemMessage"},
|
||||
{"$ref": "#/definitions/FunctionMessage"},
|
||||
{"$ref": "#/definitions/ToolMessage"},
|
||||
]
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": ["input"],
|
||||
"definitions": {
|
||||
"ToolCall": {
|
||||
"title": "ToolCall",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"args": {"title": "Args", "type": "object"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["name", "args", "id"],
|
||||
},
|
||||
"InvalidToolCall": {
|
||||
"title": "InvalidToolCall",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"args": {"title": "Args", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"error": {"title": "Error", "type": "string"},
|
||||
},
|
||||
"required": ["name", "args", "id", "error"],
|
||||
},
|
||||
"UsageMetadata": {
|
||||
"title": "UsageMetadata",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input_tokens": {"title": "Input Tokens", "type": "integer"},
|
||||
"output_tokens": {"title": "Output Tokens", "type": "integer"},
|
||||
"total_tokens": {"title": "Total Tokens", "type": "integer"},
|
||||
},
|
||||
"required": ["input_tokens", "output_tokens", "total_tokens"],
|
||||
},
|
||||
"AIMessage": {
|
||||
"title": "AIMessage",
|
||||
"description": "Message from an AI.\n\nAIMessage is returned from a chat model as a response to a prompt.\n\nThis message represents the output of the model and consists of both\nthe raw output as returned by the model together standardized fields\n(e.g., tool calls, usage metadata) added by the LangChain framework.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "ai",
|
||||
"enum": ["ai"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"example": {
|
||||
"title": "Example",
|
||||
"default": False,
|
||||
"type": "boolean",
|
||||
},
|
||||
"tool_calls": {
|
||||
"title": "Tool Calls",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/ToolCall"},
|
||||
},
|
||||
"invalid_tool_calls": {
|
||||
"title": "Invalid Tool Calls",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/InvalidToolCall"},
|
||||
},
|
||||
"usage_metadata": {"$ref": "#/definitions/UsageMetadata"},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"HumanMessage": {
|
||||
"title": "HumanMessage",
|
||||
"description": 'Message from a human.\n\nHumanMessages are messages that are passed in from a human to the model.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Instantiate a chat model and invoke it with the messages\n model = ...\n print(model.invoke(messages))', # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "human",
|
||||
"enum": ["human"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"example": {
|
||||
"title": "Example",
|
||||
"default": False,
|
||||
"type": "boolean",
|
||||
},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"ChatMessage": {
|
||||
"title": "ChatMessage",
|
||||
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "chat",
|
||||
"enum": ["chat"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"role": {"title": "Role", "type": "string"},
|
||||
},
|
||||
"required": ["content", "role"],
|
||||
},
|
||||
"SystemMessage": {
|
||||
"title": "SystemMessage",
|
||||
"description": 'Message for priming AI behavior.\n\nThe system message is usually passed in as the first of a sequence\nof input messages.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Define a chat model and invoke it with the messages\n print(model.invoke(messages))', # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "system",
|
||||
"enum": ["system"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"FunctionMessage": {
|
||||
"title": "FunctionMessage",
|
||||
"description": "Message for passing the result of executing a tool back to a model.\n\nFunctionMessage are an older version of the ToolMessage schema, and\ndo not contain the tool_call_id field.\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}] # noqa: E501
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "function",
|
||||
"enum": ["function"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["content", "name"],
|
||||
},
|
||||
"ToolMessage": {
|
||||
"title": "ToolMessage",
|
||||
"description": "Message for passing the result of executing a tool back to a model.\n\nToolMessages contain the result of a tool invocation. Typically, the result\nis encoded inside the `content` field.\n\nExample: A TooMessage representing a result of 42 from a tool call with id\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "tool",
|
||||
"enum": ["tool"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"tool_call_id": {"title": "Tool Call Id", "type": "string"},
|
||||
},
|
||||
"required": ["content", "tool_call_id"],
|
||||
},
|
||||
},
|
||||
}
|
||||
prompt_optional.input_schema.schema() == snapshot(name="partial")
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"base",
|
||||
"RunnableLambda"
|
||||
],
|
||||
"name": "RunnableLambda"
|
||||
"name": "Lambda"
|
||||
}
|
||||
}
|
||||
],
|
||||
@@ -458,7 +458,7 @@
|
||||
"runnable",
|
||||
"RunnableWithFallbacks"
|
||||
],
|
||||
"name": "RunnableWithFallbacks"
|
||||
"name": "WithFallbacks"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -498,7 +498,7 @@
|
||||
"base",
|
||||
"RunnableLambda"
|
||||
],
|
||||
"name": "RunnableLambda"
|
||||
"name": "Lambda"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -511,7 +511,7 @@
|
||||
"runnable",
|
||||
"RunnableWithFallbacks"
|
||||
],
|
||||
"name": "RunnableWithFallbacks"
|
||||
"name": "WithFallbacks"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -589,7 +589,7 @@
|
||||
"runnable",
|
||||
"RunnablePassthrough"
|
||||
],
|
||||
"name": "RunnablePassthrough"
|
||||
"name": "Passthrough"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -635,7 +635,7 @@
|
||||
"runnable",
|
||||
"RunnablePassthrough"
|
||||
],
|
||||
"name": "RunnablePassthrough"
|
||||
"name": "Passthrough"
|
||||
}
|
||||
}
|
||||
],
|
||||
@@ -716,7 +716,7 @@
|
||||
"runnable",
|
||||
"RunnableWithFallbacks"
|
||||
],
|
||||
"name": "RunnableWithFallbacks"
|
||||
"name": "WithFallbacks"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -756,7 +756,7 @@
|
||||
"runnable",
|
||||
"RunnablePassthrough"
|
||||
],
|
||||
"name": "RunnablePassthrough"
|
||||
"name": "Passthrough"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -769,7 +769,7 @@
|
||||
"runnable",
|
||||
"RunnableWithFallbacks"
|
||||
],
|
||||
"name": "RunnableWithFallbacks"
|
||||
"name": "WithFallbacks"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -938,7 +938,7 @@
|
||||
"runnable",
|
||||
"RunnableWithFallbacks"
|
||||
],
|
||||
"name": "RunnableWithFallbacks"
|
||||
"name": "WithFallbacks"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -1152,7 +1152,7 @@
|
||||
"runnable",
|
||||
"RunnableWithFallbacks"
|
||||
],
|
||||
"name": "RunnableWithFallbacks"
|
||||
"name": "WithFallbacks"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
||||
@@ -36,7 +36,8 @@
|
||||
graph TD;
|
||||
PromptInput[PromptInput]:::startclass;
|
||||
PromptTemplate([PromptTemplate]):::otherclass;
|
||||
FakeListLLM([FakeListLLM]):::otherclass;
|
||||
FakeListLLM([<strong>FakeListLLM</strong>
|
||||
key = 2]):::otherclass;
|
||||
CommaSeparatedListOutputParser([CommaSeparatedListOutputParser]):::otherclass;
|
||||
CommaSeparatedListOutputParserOutput[CommaSeparatedListOutputParserOutput]:::endclass;
|
||||
PromptInput --> PromptTemplate;
|
||||
@@ -98,6 +99,897 @@
|
||||
+--------------------------------+
|
||||
'''
|
||||
# ---
|
||||
# name: test_graph_sequence_map[graph_no_schemas]
|
||||
dict({
|
||||
'edges': list([
|
||||
dict({
|
||||
'source': 0,
|
||||
'target': 1,
|
||||
}),
|
||||
dict({
|
||||
'source': 1,
|
||||
'target': 2,
|
||||
}),
|
||||
dict({
|
||||
'source': 3,
|
||||
'target': 5,
|
||||
}),
|
||||
dict({
|
||||
'source': 5,
|
||||
'target': 4,
|
||||
}),
|
||||
dict({
|
||||
'source': 6,
|
||||
'target': 8,
|
||||
}),
|
||||
dict({
|
||||
'source': 8,
|
||||
'target': 7,
|
||||
}),
|
||||
dict({
|
||||
'source': 6,
|
||||
'target': 9,
|
||||
}),
|
||||
dict({
|
||||
'source': 9,
|
||||
'target': 7,
|
||||
}),
|
||||
dict({
|
||||
'source': 3,
|
||||
'target': 6,
|
||||
}),
|
||||
dict({
|
||||
'source': 7,
|
||||
'target': 4,
|
||||
}),
|
||||
dict({
|
||||
'source': 2,
|
||||
'target': 3,
|
||||
}),
|
||||
]),
|
||||
'nodes': list([
|
||||
dict({
|
||||
'data': 'PromptInput',
|
||||
'id': 0,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain',
|
||||
'prompts',
|
||||
'prompt',
|
||||
'PromptTemplate',
|
||||
]),
|
||||
'name': 'PromptTemplate',
|
||||
}),
|
||||
'id': 1,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain_core',
|
||||
'language_models',
|
||||
'fake',
|
||||
'FakeListLLM',
|
||||
]),
|
||||
'name': 'FakeListLLM',
|
||||
}),
|
||||
'id': 2,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
dict({
|
||||
'data': 'Parallel<as_list,as_str>Input',
|
||||
'id': 3,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': 'Parallel<as_list,as_str>Output',
|
||||
'id': 4,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain',
|
||||
'output_parsers',
|
||||
'list',
|
||||
'CommaSeparatedListOutputParser',
|
||||
]),
|
||||
'name': 'CommaSeparatedListOutputParser',
|
||||
}),
|
||||
'id': 5,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
dict({
|
||||
'data': 'conditional_str_parser_input',
|
||||
'id': 6,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': 'conditional_str_parser_output',
|
||||
'id': 7,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain',
|
||||
'schema',
|
||||
'output_parser',
|
||||
'StrOutputParser',
|
||||
]),
|
||||
'name': 'StrOutputParser',
|
||||
}),
|
||||
'id': 8,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain_core',
|
||||
'output_parsers',
|
||||
'xml',
|
||||
'XMLOutputParser',
|
||||
]),
|
||||
'name': 'XMLOutputParser',
|
||||
}),
|
||||
'id': 9,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
]),
|
||||
})
|
||||
# ---
|
||||
# name: test_graph_sequence_map[graph_with_schema]
|
||||
dict({
|
||||
'edges': list([
|
||||
dict({
|
||||
'source': 0,
|
||||
'target': 1,
|
||||
}),
|
||||
dict({
|
||||
'source': 1,
|
||||
'target': 2,
|
||||
}),
|
||||
dict({
|
||||
'source': 3,
|
||||
'target': 5,
|
||||
}),
|
||||
dict({
|
||||
'source': 5,
|
||||
'target': 4,
|
||||
}),
|
||||
dict({
|
||||
'source': 6,
|
||||
'target': 8,
|
||||
}),
|
||||
dict({
|
||||
'source': 8,
|
||||
'target': 7,
|
||||
}),
|
||||
dict({
|
||||
'source': 6,
|
||||
'target': 9,
|
||||
}),
|
||||
dict({
|
||||
'source': 9,
|
||||
'target': 7,
|
||||
}),
|
||||
dict({
|
||||
'source': 3,
|
||||
'target': 6,
|
||||
}),
|
||||
dict({
|
||||
'source': 7,
|
||||
'target': 4,
|
||||
}),
|
||||
dict({
|
||||
'source': 2,
|
||||
'target': 3,
|
||||
}),
|
||||
]),
|
||||
'nodes': list([
|
||||
dict({
|
||||
'data': dict({
|
||||
'properties': dict({
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'name',
|
||||
]),
|
||||
'title': 'PromptInput',
|
||||
'type': 'object',
|
||||
}),
|
||||
'id': 0,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain',
|
||||
'prompts',
|
||||
'prompt',
|
||||
'PromptTemplate',
|
||||
]),
|
||||
'name': 'PromptTemplate',
|
||||
}),
|
||||
'id': 1,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain_core',
|
||||
'language_models',
|
||||
'fake',
|
||||
'FakeListLLM',
|
||||
]),
|
||||
'name': 'FakeListLLM',
|
||||
}),
|
||||
'id': 2,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'$ref': '#/definitions/AIMessage',
|
||||
}),
|
||||
dict({
|
||||
'$ref': '#/definitions/HumanMessage',
|
||||
}),
|
||||
dict({
|
||||
'$ref': '#/definitions/ChatMessage',
|
||||
}),
|
||||
dict({
|
||||
'$ref': '#/definitions/SystemMessage',
|
||||
}),
|
||||
dict({
|
||||
'$ref': '#/definitions/FunctionMessage',
|
||||
}),
|
||||
dict({
|
||||
'$ref': '#/definitions/ToolMessage',
|
||||
}),
|
||||
]),
|
||||
'definitions': dict({
|
||||
'AIMessage': dict({
|
||||
'description': '''
|
||||
Message from an AI.
|
||||
|
||||
AIMessage is returned from a chat model as a response to a prompt.
|
||||
|
||||
This message represents the output of the model and consists of both
|
||||
the raw output as returned by the model together standardized fields
|
||||
(e.g., tool calls, usage metadata) added by the LangChain framework.
|
||||
''',
|
||||
'properties': dict({
|
||||
'additional_kwargs': dict({
|
||||
'title': 'Additional Kwargs',
|
||||
'type': 'object',
|
||||
}),
|
||||
'content': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'items': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'type': 'object',
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
'type': 'array',
|
||||
}),
|
||||
]),
|
||||
'title': 'Content',
|
||||
}),
|
||||
'example': dict({
|
||||
'default': False,
|
||||
'title': 'Example',
|
||||
'type': 'boolean',
|
||||
}),
|
||||
'id': dict({
|
||||
'title': 'Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'invalid_tool_calls': dict({
|
||||
'default': list([
|
||||
]),
|
||||
'items': dict({
|
||||
'$ref': '#/definitions/InvalidToolCall',
|
||||
}),
|
||||
'title': 'Invalid Tool Calls',
|
||||
'type': 'array',
|
||||
}),
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
'response_metadata': dict({
|
||||
'title': 'Response Metadata',
|
||||
'type': 'object',
|
||||
}),
|
||||
'tool_calls': dict({
|
||||
'default': list([
|
||||
]),
|
||||
'items': dict({
|
||||
'$ref': '#/definitions/ToolCall',
|
||||
}),
|
||||
'title': 'Tool Calls',
|
||||
'type': 'array',
|
||||
}),
|
||||
'type': dict({
|
||||
'default': 'ai',
|
||||
'enum': list([
|
||||
'ai',
|
||||
]),
|
||||
'title': 'Type',
|
||||
'type': 'string',
|
||||
}),
|
||||
'usage_metadata': dict({
|
||||
'$ref': '#/definitions/UsageMetadata',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'content',
|
||||
]),
|
||||
'title': 'AIMessage',
|
||||
'type': 'object',
|
||||
}),
|
||||
'ChatMessage': dict({
|
||||
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
|
||||
'properties': dict({
|
||||
'additional_kwargs': dict({
|
||||
'title': 'Additional Kwargs',
|
||||
'type': 'object',
|
||||
}),
|
||||
'content': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'items': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'type': 'object',
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
'type': 'array',
|
||||
}),
|
||||
]),
|
||||
'title': 'Content',
|
||||
}),
|
||||
'id': dict({
|
||||
'title': 'Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
'response_metadata': dict({
|
||||
'title': 'Response Metadata',
|
||||
'type': 'object',
|
||||
}),
|
||||
'role': dict({
|
||||
'title': 'Role',
|
||||
'type': 'string',
|
||||
}),
|
||||
'type': dict({
|
||||
'default': 'chat',
|
||||
'enum': list([
|
||||
'chat',
|
||||
]),
|
||||
'title': 'Type',
|
||||
'type': 'string',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'content',
|
||||
'role',
|
||||
]),
|
||||
'title': 'ChatMessage',
|
||||
'type': 'object',
|
||||
}),
|
||||
'FunctionMessage': dict({
|
||||
'description': '''
|
||||
Message for passing the result of executing a tool back to a model.
|
||||
|
||||
FunctionMessage are an older version of the ToolMessage schema, and
|
||||
do not contain the tool_call_id field.
|
||||
|
||||
The tool_call_id field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
''',
|
||||
'properties': dict({
|
||||
'additional_kwargs': dict({
|
||||
'title': 'Additional Kwargs',
|
||||
'type': 'object',
|
||||
}),
|
||||
'content': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'items': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'type': 'object',
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
'type': 'array',
|
||||
}),
|
||||
]),
|
||||
'title': 'Content',
|
||||
}),
|
||||
'id': dict({
|
||||
'title': 'Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
'response_metadata': dict({
|
||||
'title': 'Response Metadata',
|
||||
'type': 'object',
|
||||
}),
|
||||
'type': dict({
|
||||
'default': 'function',
|
||||
'enum': list([
|
||||
'function',
|
||||
]),
|
||||
'title': 'Type',
|
||||
'type': 'string',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'content',
|
||||
'name',
|
||||
]),
|
||||
'title': 'FunctionMessage',
|
||||
'type': 'object',
|
||||
}),
|
||||
'HumanMessage': dict({
|
||||
'description': '''
|
||||
Message from a human.
|
||||
|
||||
HumanMessages are messages that are passed in from a human to the model.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(
|
||||
content="You are a helpful assistant! Your name is Bob."
|
||||
),
|
||||
HumanMessage(
|
||||
content="What is your name?"
|
||||
)
|
||||
]
|
||||
|
||||
# Instantiate a chat model and invoke it with the messages
|
||||
model = ...
|
||||
print(model.invoke(messages))
|
||||
''',
|
||||
'properties': dict({
|
||||
'additional_kwargs': dict({
|
||||
'title': 'Additional Kwargs',
|
||||
'type': 'object',
|
||||
}),
|
||||
'content': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'items': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'type': 'object',
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
'type': 'array',
|
||||
}),
|
||||
]),
|
||||
'title': 'Content',
|
||||
}),
|
||||
'example': dict({
|
||||
'default': False,
|
||||
'title': 'Example',
|
||||
'type': 'boolean',
|
||||
}),
|
||||
'id': dict({
|
||||
'title': 'Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
'response_metadata': dict({
|
||||
'title': 'Response Metadata',
|
||||
'type': 'object',
|
||||
}),
|
||||
'type': dict({
|
||||
'default': 'human',
|
||||
'enum': list([
|
||||
'human',
|
||||
]),
|
||||
'title': 'Type',
|
||||
'type': 'string',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'content',
|
||||
]),
|
||||
'title': 'HumanMessage',
|
||||
'type': 'object',
|
||||
}),
|
||||
'InvalidToolCall': dict({
|
||||
'properties': dict({
|
||||
'args': dict({
|
||||
'title': 'Args',
|
||||
'type': 'string',
|
||||
}),
|
||||
'error': dict({
|
||||
'title': 'Error',
|
||||
'type': 'string',
|
||||
}),
|
||||
'id': dict({
|
||||
'title': 'Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'name',
|
||||
'args',
|
||||
'id',
|
||||
'error',
|
||||
]),
|
||||
'title': 'InvalidToolCall',
|
||||
'type': 'object',
|
||||
}),
|
||||
'SystemMessage': dict({
|
||||
'description': '''
|
||||
Message for priming AI behavior.
|
||||
|
||||
The system message is usually passed in as the first of a sequence
|
||||
of input messages.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
messages = [
|
||||
SystemMessage(
|
||||
content="You are a helpful assistant! Your name is Bob."
|
||||
),
|
||||
HumanMessage(
|
||||
content="What is your name?"
|
||||
)
|
||||
]
|
||||
|
||||
# Define a chat model and invoke it with the messages
|
||||
print(model.invoke(messages))
|
||||
''',
|
||||
'properties': dict({
|
||||
'additional_kwargs': dict({
|
||||
'title': 'Additional Kwargs',
|
||||
'type': 'object',
|
||||
}),
|
||||
'content': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'items': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'type': 'object',
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
'type': 'array',
|
||||
}),
|
||||
]),
|
||||
'title': 'Content',
|
||||
}),
|
||||
'id': dict({
|
||||
'title': 'Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
'response_metadata': dict({
|
||||
'title': 'Response Metadata',
|
||||
'type': 'object',
|
||||
}),
|
||||
'type': dict({
|
||||
'default': 'system',
|
||||
'enum': list([
|
||||
'system',
|
||||
]),
|
||||
'title': 'Type',
|
||||
'type': 'string',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'content',
|
||||
]),
|
||||
'title': 'SystemMessage',
|
||||
'type': 'object',
|
||||
}),
|
||||
'ToolCall': dict({
|
||||
'properties': dict({
|
||||
'args': dict({
|
||||
'title': 'Args',
|
||||
'type': 'object',
|
||||
}),
|
||||
'id': dict({
|
||||
'title': 'Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'name',
|
||||
'args',
|
||||
'id',
|
||||
]),
|
||||
'title': 'ToolCall',
|
||||
'type': 'object',
|
||||
}),
|
||||
'ToolMessage': dict({
|
||||
'description': '''
|
||||
Message for passing the result of executing a tool back to a model.
|
||||
|
||||
ToolMessages contain the result of a tool invocation. Typically, the result
|
||||
is encoded inside the `content` field.
|
||||
|
||||
Example: A ToolMessage representing a result of 42 from a tool call with id
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
|
||||
|
||||
Example: A ToolMessage where only part of the tool output is sent to the model
|
||||
and the full output is passed in to raw_output.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_core.messages import ToolMessage
|
||||
|
||||
tool_output = {
|
||||
"stdout": "From the graph we can see that the correlation between x and y is ...",
|
||||
"stderr": None,
|
||||
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
||||
}
|
||||
|
||||
ToolMessage(
|
||||
content=tool_output["stdout"],
|
||||
raw_output=tool_output,
|
||||
tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',
|
||||
)
|
||||
|
||||
The tool_call_id field is used to associate the tool call request with the
|
||||
tool call response. This is useful in situations where a chat model is able
|
||||
to request multiple tool calls in parallel.
|
||||
''',
|
||||
'properties': dict({
|
||||
'additional_kwargs': dict({
|
||||
'title': 'Additional Kwargs',
|
||||
'type': 'object',
|
||||
}),
|
||||
'content': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'items': dict({
|
||||
'anyOf': list([
|
||||
dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
dict({
|
||||
'type': 'object',
|
||||
}),
|
||||
]),
|
||||
}),
|
||||
'type': 'array',
|
||||
}),
|
||||
]),
|
||||
'title': 'Content',
|
||||
}),
|
||||
'id': dict({
|
||||
'title': 'Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'name': dict({
|
||||
'title': 'Name',
|
||||
'type': 'string',
|
||||
}),
|
||||
'raw_output': dict({
|
||||
'title': 'Raw Output',
|
||||
}),
|
||||
'response_metadata': dict({
|
||||
'title': 'Response Metadata',
|
||||
'type': 'object',
|
||||
}),
|
||||
'tool_call_id': dict({
|
||||
'title': 'Tool Call Id',
|
||||
'type': 'string',
|
||||
}),
|
||||
'type': dict({
|
||||
'default': 'tool',
|
||||
'enum': list([
|
||||
'tool',
|
||||
]),
|
||||
'title': 'Type',
|
||||
'type': 'string',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'content',
|
||||
'tool_call_id',
|
||||
]),
|
||||
'title': 'ToolMessage',
|
||||
'type': 'object',
|
||||
}),
|
||||
'UsageMetadata': dict({
|
||||
'properties': dict({
|
||||
'input_tokens': dict({
|
||||
'title': 'Input Tokens',
|
||||
'type': 'integer',
|
||||
}),
|
||||
'output_tokens': dict({
|
||||
'title': 'Output Tokens',
|
||||
'type': 'integer',
|
||||
}),
|
||||
'total_tokens': dict({
|
||||
'title': 'Total Tokens',
|
||||
'type': 'integer',
|
||||
}),
|
||||
}),
|
||||
'required': list([
|
||||
'input_tokens',
|
||||
'output_tokens',
|
||||
'total_tokens',
|
||||
]),
|
||||
'title': 'UsageMetadata',
|
||||
'type': 'object',
|
||||
}),
|
||||
}),
|
||||
'title': 'RunnableParallel<as_list,as_str>Input',
|
||||
}),
|
||||
'id': 3,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'properties': dict({
|
||||
'as_list': dict({
|
||||
'items': dict({
|
||||
'type': 'string',
|
||||
}),
|
||||
'title': 'As List',
|
||||
'type': 'array',
|
||||
}),
|
||||
'as_str': dict({
|
||||
'title': 'As Str',
|
||||
}),
|
||||
}),
|
||||
'title': 'RunnableParallel<as_list,as_str>Output',
|
||||
'type': 'object',
|
||||
}),
|
||||
'id': 4,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain',
|
||||
'output_parsers',
|
||||
'list',
|
||||
'CommaSeparatedListOutputParser',
|
||||
]),
|
||||
'name': 'CommaSeparatedListOutputParser',
|
||||
}),
|
||||
'id': 5,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'title': 'conditional_str_parser_input',
|
||||
'type': 'string',
|
||||
}),
|
||||
'id': 6,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'title': 'conditional_str_parser_output',
|
||||
}),
|
||||
'id': 7,
|
||||
'type': 'schema',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain',
|
||||
'schema',
|
||||
'output_parser',
|
||||
'StrOutputParser',
|
||||
]),
|
||||
'name': 'StrOutputParser',
|
||||
}),
|
||||
'id': 8,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
dict({
|
||||
'data': dict({
|
||||
'id': list([
|
||||
'langchain_core',
|
||||
'output_parsers',
|
||||
'xml',
|
||||
'XMLOutputParser',
|
||||
]),
|
||||
'name': 'XMLOutputParser',
|
||||
}),
|
||||
'id': 9,
|
||||
'type': 'runnable',
|
||||
}),
|
||||
]),
|
||||
})
|
||||
# ---
|
||||
# name: test_graph_sequence_map[mermaid-simple]
|
||||
'''
|
||||
graph TD;
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -9,7 +9,6 @@ from langchain_core.output_parsers.xml import XMLOutputParser
|
||||
from langchain_core.prompts.prompt import PromptTemplate
|
||||
from langchain_core.runnables.base import Runnable, RunnableConfig
|
||||
from langchain_core.runnables.graph_mermaid import _escape_node_label
|
||||
from tests.unit_tests.stubs import AnyStr
|
||||
|
||||
|
||||
def test_graph_single_runnable(snapshot: SnapshotAssertion) -> None:
|
||||
@@ -34,7 +33,7 @@ def test_graph_sequence(snapshot: SnapshotAssertion) -> None:
|
||||
prompt = PromptTemplate.from_template("Hello, {name}!")
|
||||
list_parser = CommaSeparatedListOutputParser()
|
||||
|
||||
sequence = prompt | fake_llm | list_parser
|
||||
sequence = prompt | fake_llm.with_config(metadata={"key": 2}) | list_parser
|
||||
graph = sequence.get_graph()
|
||||
assert graph.to_json() == {
|
||||
"nodes": [
|
||||
@@ -58,6 +57,7 @@ def test_graph_sequence(snapshot: SnapshotAssertion) -> None:
|
||||
"id": ["langchain_core", "language_models", "fake", "FakeListLLM"],
|
||||
"name": "FakeListLLM",
|
||||
},
|
||||
"metadata": {"key": 2},
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
@@ -112,6 +112,7 @@ def test_graph_sequence(snapshot: SnapshotAssertion) -> None:
|
||||
"id": ["langchain_core", "language_models", "fake", "FakeListLLM"],
|
||||
"name": "FakeListLLM",
|
||||
},
|
||||
"metadata": {"key": 2},
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
@@ -169,527 +170,8 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
|
||||
}
|
||||
)
|
||||
graph = sequence.get_graph()
|
||||
assert graph.to_json(with_schemas=True) == {
|
||||
"nodes": [
|
||||
{
|
||||
"id": 0,
|
||||
"type": "schema",
|
||||
"data": {
|
||||
"title": "PromptInput",
|
||||
"type": "object",
|
||||
"properties": {"name": {"title": "Name", "type": "string"}},
|
||||
"required": ["name"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": ["langchain", "prompts", "prompt", "PromptTemplate"],
|
||||
"name": "PromptTemplate",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": ["langchain_core", "language_models", "fake", "FakeListLLM"],
|
||||
"name": "FakeListLLM",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "schema",
|
||||
"data": {
|
||||
"title": "RunnableParallel<as_list,as_str>Input",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"$ref": "#/definitions/AIMessage"},
|
||||
{"$ref": "#/definitions/HumanMessage"},
|
||||
{"$ref": "#/definitions/ChatMessage"},
|
||||
{"$ref": "#/definitions/SystemMessage"},
|
||||
{"$ref": "#/definitions/FunctionMessage"},
|
||||
{"$ref": "#/definitions/ToolMessage"},
|
||||
],
|
||||
"definitions": {
|
||||
"ToolCall": {
|
||||
"title": "ToolCall",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"args": {"title": "Args", "type": "object"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["name", "args", "id"],
|
||||
},
|
||||
"InvalidToolCall": {
|
||||
"title": "InvalidToolCall",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"args": {"title": "Args", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"error": {"title": "Error", "type": "string"},
|
||||
},
|
||||
"required": ["name", "args", "id", "error"],
|
||||
},
|
||||
"UsageMetadata": {
|
||||
"title": "UsageMetadata",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input_tokens": {
|
||||
"title": "Input Tokens",
|
||||
"type": "integer",
|
||||
},
|
||||
"output_tokens": {
|
||||
"title": "Output Tokens",
|
||||
"type": "integer",
|
||||
},
|
||||
"total_tokens": {
|
||||
"title": "Total Tokens",
|
||||
"type": "integer",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"input_tokens",
|
||||
"output_tokens",
|
||||
"total_tokens",
|
||||
],
|
||||
},
|
||||
"AIMessage": {
|
||||
"title": "AIMessage",
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "object"},
|
||||
]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "ai",
|
||||
"enum": ["ai"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"example": {
|
||||
"title": "Example",
|
||||
"default": False,
|
||||
"type": "boolean",
|
||||
},
|
||||
"tool_calls": {
|
||||
"title": "Tool Calls",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/ToolCall"},
|
||||
},
|
||||
"invalid_tool_calls": {
|
||||
"title": "Invalid Tool Calls",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/InvalidToolCall"},
|
||||
},
|
||||
"usage_metadata": {
|
||||
"$ref": "#/definitions/UsageMetadata"
|
||||
},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"HumanMessage": {
|
||||
"title": "HumanMessage",
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "object"},
|
||||
]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "human",
|
||||
"enum": ["human"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"example": {
|
||||
"title": "Example",
|
||||
"default": False,
|
||||
"type": "boolean",
|
||||
},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"ChatMessage": {
|
||||
"title": "ChatMessage",
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "object"},
|
||||
]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "chat",
|
||||
"enum": ["chat"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"role": {"title": "Role", "type": "string"},
|
||||
},
|
||||
"required": ["content", "role"],
|
||||
},
|
||||
"SystemMessage": {
|
||||
"title": "SystemMessage",
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "object"},
|
||||
]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "system",
|
||||
"enum": ["system"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"FunctionMessage": {
|
||||
"title": "FunctionMessage",
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "object"},
|
||||
]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "function",
|
||||
"enum": ["function"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["content", "name"],
|
||||
},
|
||||
"ToolMessage": {
|
||||
"title": "ToolMessage",
|
||||
"description": AnyStr(),
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "object"},
|
||||
]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "tool",
|
||||
"enum": ["tool"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"tool_call_id": {
|
||||
"title": "Tool Call Id",
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
"required": ["content", "tool_call_id"],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "schema",
|
||||
"data": {
|
||||
"title": "RunnableParallel<as_list,as_str>Output",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"as_list": {
|
||||
"title": "As List",
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
"as_str": {"title": "As Str"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": [
|
||||
"langchain",
|
||||
"output_parsers",
|
||||
"list",
|
||||
"CommaSeparatedListOutputParser",
|
||||
],
|
||||
"name": "CommaSeparatedListOutputParser",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "schema",
|
||||
"data": {"title": "conditional_str_parser_input", "type": "string"},
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "schema",
|
||||
"data": {"title": "conditional_str_parser_output"},
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": ["langchain", "schema", "output_parser", "StrOutputParser"],
|
||||
"name": "StrOutputParser",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": [
|
||||
"langchain_core",
|
||||
"output_parsers",
|
||||
"xml",
|
||||
"XMLOutputParser",
|
||||
],
|
||||
"name": "XMLOutputParser",
|
||||
},
|
||||
},
|
||||
],
|
||||
"edges": [
|
||||
{"source": 0, "target": 1},
|
||||
{"source": 1, "target": 2},
|
||||
{"source": 3, "target": 5},
|
||||
{"source": 5, "target": 4},
|
||||
{"source": 6, "target": 8},
|
||||
{"source": 8, "target": 7},
|
||||
{"source": 6, "target": 9},
|
||||
{"source": 9, "target": 7},
|
||||
{"source": 3, "target": 6},
|
||||
{"source": 7, "target": 4},
|
||||
{"source": 2, "target": 3},
|
||||
],
|
||||
}
|
||||
assert graph.to_json() == {
|
||||
"nodes": [
|
||||
{
|
||||
"id": 0,
|
||||
"type": "schema",
|
||||
"data": "PromptInput",
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": ["langchain", "prompts", "prompt", "PromptTemplate"],
|
||||
"name": "PromptTemplate",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": ["langchain_core", "language_models", "fake", "FakeListLLM"],
|
||||
"name": "FakeListLLM",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "schema",
|
||||
"data": "Parallel<as_list,as_str>Input",
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "schema",
|
||||
"data": "Parallel<as_list,as_str>Output",
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": [
|
||||
"langchain",
|
||||
"output_parsers",
|
||||
"list",
|
||||
"CommaSeparatedListOutputParser",
|
||||
],
|
||||
"name": "CommaSeparatedListOutputParser",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "schema",
|
||||
"data": "conditional_str_parser_input",
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "schema",
|
||||
"data": "conditional_str_parser_output",
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": ["langchain", "schema", "output_parser", "StrOutputParser"],
|
||||
"name": "StrOutputParser",
|
||||
},
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "runnable",
|
||||
"data": {
|
||||
"id": [
|
||||
"langchain_core",
|
||||
"output_parsers",
|
||||
"xml",
|
||||
"XMLOutputParser",
|
||||
],
|
||||
"name": "XMLOutputParser",
|
||||
},
|
||||
},
|
||||
],
|
||||
"edges": [
|
||||
{"source": 0, "target": 1},
|
||||
{"source": 1, "target": 2},
|
||||
{"source": 3, "target": 5},
|
||||
{"source": 5, "target": 4},
|
||||
{"source": 6, "target": 8},
|
||||
{"source": 8, "target": 7},
|
||||
{"source": 6, "target": 9},
|
||||
{"source": 9, "target": 7},
|
||||
{"source": 3, "target": 6},
|
||||
{"source": 7, "target": 4},
|
||||
{"source": 2, "target": 3},
|
||||
],
|
||||
}
|
||||
assert graph.to_json(with_schemas=True) == snapshot(name="graph_with_schema")
|
||||
assert graph.to_json() == snapshot(name="graph_no_schemas")
|
||||
assert graph.draw_ascii() == snapshot(name="ascii")
|
||||
assert graph.draw_mermaid() == snapshot(name="mermaid")
|
||||
assert graph.draw_mermaid(with_styles=False) == snapshot(name="mermaid-simple")
|
||||
|
||||
@@ -347,302 +347,12 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
|
||||
]
|
||||
)
|
||||
|
||||
assert chat_prompt.input_schema.schema() == {
|
||||
"title": "PromptInput",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"history": {
|
||||
"title": "History",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{"$ref": "#/definitions/AIMessage"},
|
||||
{"$ref": "#/definitions/HumanMessage"},
|
||||
{"$ref": "#/definitions/ChatMessage"},
|
||||
{"$ref": "#/definitions/SystemMessage"},
|
||||
{"$ref": "#/definitions/FunctionMessage"},
|
||||
{"$ref": "#/definitions/ToolMessage"},
|
||||
]
|
||||
},
|
||||
}
|
||||
},
|
||||
"required": ["history"],
|
||||
"definitions": {
|
||||
"ToolCall": {
|
||||
"title": "ToolCall",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"args": {"title": "Args", "type": "object"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["name", "args", "id"],
|
||||
},
|
||||
"InvalidToolCall": {
|
||||
"title": "InvalidToolCall",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"args": {"title": "Args", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"error": {"title": "Error", "type": "string"},
|
||||
},
|
||||
"required": ["name", "args", "id", "error"],
|
||||
},
|
||||
"UsageMetadata": {
|
||||
"title": "UsageMetadata",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input_tokens": {"title": "Input Tokens", "type": "integer"},
|
||||
"output_tokens": {"title": "Output Tokens", "type": "integer"},
|
||||
"total_tokens": {"title": "Total Tokens", "type": "integer"},
|
||||
},
|
||||
"required": ["input_tokens", "output_tokens", "total_tokens"],
|
||||
},
|
||||
"AIMessage": {
|
||||
"title": "AIMessage",
|
||||
"description": "Message from an AI.\n\nAIMessage is returned from a chat model as a response to a prompt.\n\nThis message represents the output of the model and consists of both\nthe raw output as returned by the model together standardized fields\n(e.g., tool calls, usage metadata) added by the LangChain framework.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "ai",
|
||||
"enum": ["ai"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"example": {
|
||||
"title": "Example",
|
||||
"default": False,
|
||||
"type": "boolean",
|
||||
},
|
||||
"tool_calls": {
|
||||
"title": "Tool Calls",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/ToolCall"},
|
||||
},
|
||||
"invalid_tool_calls": {
|
||||
"title": "Invalid Tool Calls",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {"$ref": "#/definitions/InvalidToolCall"},
|
||||
},
|
||||
"usage_metadata": {"$ref": "#/definitions/UsageMetadata"},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"HumanMessage": {
|
||||
"title": "HumanMessage",
|
||||
"description": 'Message from a human.\n\nHumanMessages are messages that are passed in from a human to the model.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Instantiate a chat model and invoke it with the messages\n model = ...\n print(model.invoke(messages))', # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "human",
|
||||
"enum": ["human"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"example": {
|
||||
"title": "Example",
|
||||
"default": False,
|
||||
"type": "boolean",
|
||||
},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"ChatMessage": {
|
||||
"title": "ChatMessage",
|
||||
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "chat",
|
||||
"enum": ["chat"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"role": {"title": "Role", "type": "string"},
|
||||
},
|
||||
"required": ["content", "role"],
|
||||
},
|
||||
"SystemMessage": {
|
||||
"title": "SystemMessage",
|
||||
"description": 'Message for priming AI behavior.\n\nThe system message is usually passed in as the first of a sequence\nof input messages.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Define a chat model and invoke it with the messages\n print(model.invoke(messages))', # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "system",
|
||||
"enum": ["system"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
"FunctionMessage": {
|
||||
"title": "FunctionMessage",
|
||||
"description": "Message for passing the result of executing a tool back to a model.\n\nFunctionMessage are an older version of the ToolMessage schema, and\ndo not contain the tool_call_id field.\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "function",
|
||||
"enum": ["function"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
},
|
||||
"required": ["content", "name"],
|
||||
},
|
||||
"ToolMessage": {
|
||||
"title": "ToolMessage",
|
||||
"description": "Message for passing the result of executing a tool back to a model.\n\nToolMessages contain the result of a tool invocation. Typically, the result\nis encoded inside the `content` field.\n\nExample: A TooMessage representing a result of 42 from a tool call with id\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"title": "Content",
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"anyOf": [{"type": "string"}, {"type": "object"}]
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
"additional_kwargs": {
|
||||
"title": "Additional Kwargs",
|
||||
"type": "object",
|
||||
},
|
||||
"response_metadata": {
|
||||
"title": "Response Metadata",
|
||||
"type": "object",
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"default": "tool",
|
||||
"enum": ["tool"],
|
||||
"type": "string",
|
||||
},
|
||||
"name": {"title": "Name", "type": "string"},
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"tool_call_id": {"title": "Tool Call Id", "type": "string"},
|
||||
},
|
||||
"required": ["content", "tool_call_id"],
|
||||
},
|
||||
},
|
||||
}
|
||||
assert chat_prompt.output_schema.schema() == snapshot
|
||||
assert chat_prompt.input_schema.schema() == snapshot(
|
||||
name="chat_prompt_input_schema"
|
||||
)
|
||||
assert chat_prompt.output_schema.schema() == snapshot(
|
||||
name="chat_prompt_output_schema"
|
||||
)
|
||||
|
||||
prompt = PromptTemplate.from_template("Hello, {name}!")
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user