multiple: pydantic 2 compatibility, v0.3 (#26443)

Signed-off-by: ChengZi <chen.zhang@zilliz.com>
Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com>
Co-authored-by: Dan O'Donovan <dan.odonovan@gmail.com>
Co-authored-by: Tom Daniel Grande <tomdgrande@gmail.com>
Co-authored-by: Grande <Tom.Daniel.Grande@statsbygg.no>
Co-authored-by: Bagatur <baskaryan@gmail.com>
Co-authored-by: ccurme <chester.curme@gmail.com>
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
Co-authored-by: Tomaz Bratanic <bratanic.tomaz@gmail.com>
Co-authored-by: ZhangShenao <15201440436@163.com>
Co-authored-by: Friso H. Kingma <fhkingma@gmail.com>
Co-authored-by: ChengZi <chen.zhang@zilliz.com>
Co-authored-by: Nuno Campos <nuno@langchain.dev>
Co-authored-by: Morgante Pell <morgantep@google.com>
This commit is contained in:
Erick Friis 2024-09-13 14:38:45 -07:00 committed by GitHub
parent d9813bdbbc
commit c2a3021bb0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1402 changed files with 38318 additions and 30410 deletions

View File

@ -2,10 +2,12 @@ import glob
import json
import os
import sys
import tomllib
from collections import defaultdict
from typing import Dict, List, Set
from pathlib import Path
import tomllib
from get_min_versions import get_min_version_from_toml
LANGCHAIN_DIRS = [
@ -16,6 +18,12 @@ LANGCHAIN_DIRS = [
"libs/experimental",
]
# when set to True, we are ignoring core dependents
# in order to be able to get CI to pass for each individual
# package that depends on core
# e.g. if you touch core, we don't then add textsplitters/etc to CI
IGNORE_CORE_DEPENDENTS = False
# ignored partners are removed from dependents
# but still run if directly edited
IGNORED_PARTNERS = [
@ -99,44 +107,96 @@ def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
if dir_ == "libs/core":
return [
{"working-directory": dir_, "python-version": f"3.{v}"}
for v in range(8, 13)
]
min_python = "3.8"
max_python = "3.12"
if job == "test-pydantic":
return _get_pydantic_test_configs(dir_)
if dir_ == "libs/core":
py_versions = ["3.9", "3.10", "3.11", "3.12"]
# custom logic for specific directories
if dir_ == "libs/partners/milvus":
elif dir_ == "libs/partners/milvus":
# milvus poetry doesn't allow 3.12 because they
# declare deps in funny way
max_python = "3.11"
py_versions = ["3.9", "3.11"]
if dir_ in ["libs/community", "libs/langchain"] and job == "extended-tests":
elif dir_ in ["libs/community", "libs/langchain"] and job == "extended-tests":
# community extended test resolution in 3.12 is slow
# even in uv
max_python = "3.11"
py_versions = ["3.9", "3.11"]
if dir_ == "libs/community" and job == "compile-integration-tests":
elif dir_ == "libs/community" and job == "compile-integration-tests":
# community integration deps are slow in 3.12
max_python = "3.11"
py_versions = ["3.9", "3.11"]
else:
py_versions = ["3.9", "3.12"]
return [
{"working-directory": dir_, "python-version": min_python},
{"working-directory": dir_, "python-version": max_python},
return [{"working-directory": dir_, "python-version": py_v} for py_v in py_versions]
def _get_pydantic_test_configs(
dir_: str, *, python_version: str = "3.11"
) -> List[Dict[str, str]]:
with open("./libs/core/poetry.lock", "rb") as f:
core_poetry_lock_data = tomllib.load(f)
for package in core_poetry_lock_data["package"]:
if package["name"] == "pydantic":
core_max_pydantic_minor = package["version"].split(".")[1]
break
with open(f"./{dir_}/poetry.lock", "rb") as f:
dir_poetry_lock_data = tomllib.load(f)
for package in dir_poetry_lock_data["package"]:
if package["name"] == "pydantic":
dir_max_pydantic_minor = package["version"].split(".")[1]
break
core_min_pydantic_version = get_min_version_from_toml(
"./libs/core/pyproject.toml", "release", python_version, include=["pydantic"]
)["pydantic"]
core_min_pydantic_minor = core_min_pydantic_version.split(".")[1] if "." in core_min_pydantic_version else "0"
dir_min_pydantic_version = (
get_min_version_from_toml(
f"./{dir_}/pyproject.toml", "release", python_version, include=["pydantic"]
)
.get("pydantic", "0.0.0")
)
dir_min_pydantic_minor = dir_min_pydantic_version.split(".")[1] if "." in dir_min_pydantic_version else "0"
custom_mins = {
# depends on pydantic-settings 2.4 which requires pydantic 2.7
"libs/community": 7,
}
max_pydantic_minor = min(
int(dir_max_pydantic_minor),
int(core_max_pydantic_minor),
)
min_pydantic_minor = max(
int(dir_min_pydantic_minor),
int(core_min_pydantic_minor),
custom_mins.get(dir_, 0),
)
configs = [
{
"working-directory": dir_,
"pydantic-version": f"2.{v}.0",
"python-version": python_version,
}
for v in range(min_pydantic_minor, max_pydantic_minor + 1)
]
return configs
def _get_configs_for_multi_dirs(
job: str, dirs_to_run: List[str], dependents: dict
job: str, dirs_to_run: Dict[str, Set[str]], dependents: dict
) -> List[Dict[str, str]]:
if job == "lint":
dirs = add_dependents(
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
dependents,
)
elif job in ["test", "compile-integration-tests", "dependencies"]:
elif job in ["test", "compile-integration-tests", "dependencies", "test-pydantic"]:
dirs = add_dependents(
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
)
@ -165,6 +225,7 @@ if __name__ == "__main__":
dirs_to_run["lint"] = all_package_dirs()
dirs_to_run["test"] = all_package_dirs()
dirs_to_run["extended-test"] = set(LANGCHAIN_DIRS)
for file in files:
if any(
file.startswith(dir_)
@ -182,8 +243,12 @@ if __name__ == "__main__":
if any(file.startswith(dir_) for dir_ in LANGCHAIN_DIRS):
# add that dir and all dirs after in LANGCHAIN_DIRS
# for extended testing
found = False
for dir_ in LANGCHAIN_DIRS:
if dir_ == "libs/core" and IGNORE_CORE_DEPENDENTS:
dirs_to_run["extended-test"].add(dir_)
continue
if file.startswith(dir_):
found = True
if found:
@ -224,7 +289,6 @@ if __name__ == "__main__":
# we now have dirs_by_job
# todo: clean this up
map_job_to_configs = {
job: _get_configs_for_multi_dirs(job, dirs_to_run, dependents)
for job in [
@ -233,6 +297,7 @@ if __name__ == "__main__":
"extended-tests",
"compile-integration-tests",
"dependencies",
"test-pydantic",
]
}
map_job_to_configs["test-doc-imports"] = (

View File

@ -11,7 +11,7 @@ if __name__ == "__main__":
# see if we're releasing an rc
version = toml_data["tool"]["poetry"]["version"]
releasing_rc = "rc" in version
releasing_rc = "rc" in version or "dev" in version
# if not, iterate through dependencies and make sure none allow prereleases
if not releasing_rc:

View File

@ -1,4 +1,5 @@
import sys
from typing import Optional
if sys.version_info >= (3, 11):
import tomllib
@ -7,6 +8,9 @@ else:
import tomli as tomllib
from packaging.version import parse as parse_version
from packaging.specifiers import SpecifierSet
from packaging.version import Version
import re
MIN_VERSION_LIBS = [
@ -15,6 +19,7 @@ MIN_VERSION_LIBS = [
"langchain",
"langchain-text-splitters",
"SQLAlchemy",
"pydantic",
]
SKIP_IF_PULL_REQUEST = ["langchain-core"]
@ -45,7 +50,13 @@ def get_min_version(version: str) -> str:
raise ValueError(f"Unrecognized version format: {version}")
def get_min_version_from_toml(toml_path: str, versions_for: str):
def get_min_version_from_toml(
toml_path: str,
versions_for: str,
python_version: str,
*,
include: Optional[list] = None,
):
# Parse the TOML file
with open(toml_path, "rb") as file:
toml_data = tomllib.load(file)
@ -64,11 +75,20 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
continue
# Check if the lib is present in the dependencies
if lib in dependencies:
if include and lib not in include:
continue
# Get the version string
version_string = dependencies[lib]
if isinstance(version_string, dict):
version_string = version_string["version"]
if isinstance(version_string, list):
version_string = [
vs
for vs in version_string
if check_python_version(python_version, vs["python"])
][0]["version"]
# Use parse_version to get the minimum supported version from version_string
min_version = get_min_version(version_string)
@ -79,13 +99,31 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
return min_versions
def check_python_version(version_string, constraint_string):
"""
Check if the given Python version matches the given constraints.
:param version_string: A string representing the Python version (e.g. "3.8.5").
:param constraint_string: A string representing the package's Python version constraints (e.g. ">=3.6, <4.0").
:return: True if the version matches the constraints, False otherwise.
"""
try:
version = Version(version_string)
constraints = SpecifierSet(constraint_string)
return version in constraints
except Exception as e:
print(f"Error: {e}")
return False
if __name__ == "__main__":
# Get the TOML file path from the command line argument
toml_file = sys.argv[1]
versions_for = sys.argv[2]
python_version = sys.argv[3]
assert versions_for in ["release", "pull_request"]
# Call the function to get the minimum versions
min_versions = get_min_version_from_toml(toml_file, versions_for)
min_versions = get_min_version_from_toml(toml_file, versions_for, python_version)
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))

View File

@ -1,114 +0,0 @@
name: dependencies
on:
workflow_call:
inputs:
working-directory:
required: true
type: string
description: "From which folder this pipeline executes"
langchain-location:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
jobs:
build:
defaults:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: dependency checks ${{ inputs.python-version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: pydantic-cross-compat
- name: Install dependencies
shell: bash
run: poetry install
- name: Check imports with base dependencies
shell: bash
run: poetry run make check_imports
- name: Install test dependencies
shell: bash
run: poetry install --with test
- name: Install langchain editable
working-directory: ${{ inputs.working-directory }}
if: ${{ inputs.langchain-location }}
env:
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
run: |
poetry run pip install -e "$LANGCHAIN_LOCATION"
- name: Install the opposite major version of pydantic
# If normal tests use pydantic v1, here we'll use v2, and vice versa.
shell: bash
# airbyte currently doesn't support pydantic v2
if: ${{ !startsWith(inputs.working-directory, 'libs/partners/airbyte') }}
run: |
# Determine the major part of pydantic version
REGULAR_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
if [[ "$REGULAR_VERSION" == "1" ]]; then
PYDANTIC_DEP=">=2.1,<3"
TEST_WITH_VERSION="2"
elif [[ "$REGULAR_VERSION" == "2" ]]; then
PYDANTIC_DEP="<2"
TEST_WITH_VERSION="1"
else
echo "Unexpected pydantic major version '$REGULAR_VERSION', cannot determine which version to use for cross-compatibility test."
exit 1
fi
# Install via `pip` instead of `poetry add` to avoid changing lockfile,
# which would prevent caching from working: the cache would get saved
# to a different key than where it gets loaded from.
poetry run pip install "pydantic${PYDANTIC_DEP}"
# Ensure that the correct pydantic is installed now.
echo "Checking pydantic version... Expecting ${TEST_WITH_VERSION}"
# Determine the major part of pydantic version
CURRENT_VERSION=$(poetry run python -c "import pydantic; print(pydantic.__version__)" | cut -d. -f1)
# Check that the major part of pydantic version is as expected, if not
# raise an error
if [[ "$CURRENT_VERSION" != "$TEST_WITH_VERSION" ]]; then
echo "Error: expected pydantic version ${CURRENT_VERSION} to have been installed, but found: ${TEST_WITH_VERSION}"
exit 1
fi
echo "Found pydantic version ${CURRENT_VERSION}, as expected"
- name: Run pydantic compatibility tests
# airbyte currently doesn't support pydantic v2
if: ${{ !startsWith(inputs.working-directory, 'libs/partners/airbyte') }}
shell: bash
run: make test
- name: Ensure the tests did not create any additional files
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'

View File

@ -7,10 +7,6 @@ on:
required: true
type: string
description: "From which folder this pipeline executes"
langchain-location:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
@ -63,14 +59,6 @@ jobs:
run: |
poetry install --with lint,typing
- name: Install langchain editable
working-directory: ${{ inputs.working-directory }}
if: ${{ inputs.langchain-location }}
env:
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
run: |
poetry run pip install -e "$LANGCHAIN_LOCATION"
- name: Get .mypy_cache to speed up mypy
uses: actions/cache@v4
env:

View File

@ -164,6 +164,7 @@ jobs:
- name: Set up Python + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
id: setup-python
with:
python-version: ${{ env.PYTHON_VERSION }}
poetry-version: ${{ env.POETRY_VERSION }}
@ -231,7 +232,7 @@ jobs:
id: min-version
run: |
poetry run pip install packaging
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release)"
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release ${{ steps.setup-python.outputs.installed-python-version }})"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
echo "min-versions=$min_versions"

View File

@ -7,10 +7,6 @@ on:
required: true
type: string
description: "From which folder this pipeline executes"
langchain-location:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
@ -31,29 +27,42 @@ jobs:
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
id: setup-python
with:
python-version: ${{ inputs.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: core
- name: Install dependencies
shell: bash
run: poetry install --with test
- name: Install langchain editable
working-directory: ${{ inputs.working-directory }}
if: ${{ inputs.langchain-location }}
env:
LANGCHAIN_LOCATION: ${{ inputs.langchain-location }}
run: |
poetry run pip install -e "$LANGCHAIN_LOCATION"
- name: Run core tests
shell: bash
run: |
make test
- name: Get minimum versions
working-directory: ${{ inputs.working-directory }}
id: min-version
shell: bash
run: |
poetry run pip install packaging tomli
echo "Python version ${{ steps.setup-python.outputs.installed-python-version }}"
python_version="$(poetry run python --version | awk '{print $2}')"
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request $python_version)"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
echo "min-versions=$min_versions"
- name: Run unit tests with minimum dependency versions
if: ${{ steps.min-version.outputs.min-versions != '' }}
env:
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
run: |
poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
make tests
working-directory: ${{ inputs.working-directory }}
- name: Ensure the tests did not create any additional files
shell: bash
run: |
@ -66,20 +75,3 @@ jobs:
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'
- name: Get minimum versions
working-directory: ${{ inputs.working-directory }}
id: min-version
run: |
poetry run pip install packaging tomli
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request)"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
echo "min-versions=$min_versions"
- name: Run unit tests with minimum dependency versions
if: ${{ steps.min-version.outputs.min-versions != '' }}
env:
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
run: |
poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
make tests
working-directory: ${{ inputs.working-directory }}

64
.github/workflows/_test_pydantic.yml vendored Normal file
View File

@ -0,0 +1,64 @@
name: test pydantic intermediate versions
on:
workflow_call:
inputs:
working-directory:
required: true
type: string
description: "From which folder this pipeline executes"
python-version:
required: false
type: string
description: "Python version to use"
default: "3.11"
pydantic-version:
required: true
type: string
description: "Pydantic version to test."
env:
POETRY_VERSION: "1.7.1"
jobs:
build:
defaults:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: "make test # pydantic: ~=${{ inputs.pydantic-version }}, python: ${{ inputs.python-version }}, "
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: core
- name: Install dependencies
shell: bash
run: poetry install --with test
- name: Overwrite pydantic version
shell: bash
run: poetry run pip install pydantic~=${{ inputs.pydantic-version }}
- name: Run core tests
shell: bash
run: |
make test
- name: Ensure the tests did not create any additional files
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'

View File

@ -31,6 +31,7 @@ jobs:
uses: Ana06/get-changed-files@v2.2.0
- id: set-matrix
run: |
python -m pip install packaging
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
outputs:
lint: ${{ steps.set-matrix.outputs.lint }}
@ -39,6 +40,7 @@ jobs:
compile-integration-tests: ${{ steps.set-matrix.outputs.compile-integration-tests }}
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
test-pydantic: ${{ steps.set-matrix.outputs.test-pydantic }}
lint:
name: cd ${{ matrix.job-configs.working-directory }}
needs: [ build ]
@ -46,6 +48,7 @@ jobs:
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.lint) }}
fail-fast: false
uses: ./.github/workflows/_lint.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
@ -59,18 +62,34 @@ jobs:
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test) }}
fail-fast: false
uses: ./.github/workflows/_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
secrets: inherit
test-pydantic:
name: cd ${{ matrix.job-configs.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.test-pydantic != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test-pydantic) }}
fail-fast: false
uses: ./.github/workflows/_test_pydantic.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
pydantic-version: ${{ matrix.job-configs.pydantic-version }}
secrets: inherit
test-doc-imports:
needs: [ build ]
if: ${{ needs.build.outputs.test-doc-imports != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
fail-fast: false
uses: ./.github/workflows/_test_doc_imports.yml
secrets: inherit
with:
@ -83,25 +102,13 @@ jobs:
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }}
fail-fast: false
uses: ./.github/workflows/_compile_integration_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
secrets: inherit
dependencies:
name: cd ${{ matrix.job-configs.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.dependencies != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.dependencies) }}
uses: ./.github/workflows/_dependencies.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
secrets: inherit
extended-tests:
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
needs: [ build ]
@ -110,6 +117,7 @@ jobs:
matrix:
# note different variable for extended test dirs
job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }}
fail-fast: false
runs-on: ubuntu-latest
defaults:
run:
@ -149,7 +157,7 @@ jobs:
echo "$STATUS" | grep 'nothing to commit, working tree clean'
ci_success:
name: "CI Success"
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests, test-doc-imports]
needs: [build, lint, test, compile-integration-tests, extended-tests, test-doc-imports, test-pydantic]
if: |
always()
runs-on: ubuntu-latest

View File

@ -3,9 +3,8 @@ name: CI / cd . / make spell_check
on:
push:
branches: [master, v0.1]
branches: [master, v0.1, v0.2]
pull_request:
branches: [master, v0.1]
permissions:
contents: read

View File

@ -17,7 +17,7 @@ jobs:
fail-fast: false
matrix:
python-version:
- "3.8"
- "3.9"
- "3.11"
working-directory:
- "libs/partners/openai"

View File

@ -36,7 +36,6 @@ api_docs_build:
API_PKG ?= text-splitters
api_docs_quick_preview:
poetry run pip install "pydantic<2"
poetry run python docs/api_reference/create_api_rst.py $(API_PKG)
cd docs/api_reference && poetry run make html
poetry run python docs/api_reference/scripts/custom_formatter.py docs/api_reference/_build/html/

View File

@ -90,7 +90,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"# Please manually enter OpenAI Key"
]
},

View File

@ -33,8 +33,8 @@ install-py-deps:
python3 -m venv .venv
$(PYTHON) -m pip install --upgrade pip
$(PYTHON) -m pip install --upgrade uv
$(PYTHON) -m uv pip install -r vercel_requirements.txt
$(PYTHON) -m uv pip install --editable $(PARTNER_DEPS_LIST)
$(PYTHON) -m uv pip install --pre -r vercel_requirements.txt
$(PYTHON) -m uv pip install --pre --editable $(PARTNER_DEPS_LIST)
generate-files:
mkdir -p $(INTERMEDIATE_DIR)
@ -86,10 +86,6 @@ vercel-build: install-vercel-deps build generate-references
mv langchain-api-docs-build/api_reference_build/html/* static/api_reference/
rm -rf langchain-api-docs-build
NODE_OPTIONS="--max-old-space-size=5000" yarn run docusaurus build
mv build v0.2
mkdir build
mv v0.2 build
mv build/v0.2/404.html build
start:
cd $(OUTPUT_NEW_DIR) && yarn && yarn start --port=$(PORT)

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,5 @@
autodoc_pydantic>=1,<2
sphinx<=7
autodoc_pydantic>=2,<3
sphinx>=8,<9
myst-parser>=3
sphinx-autobuild>=2024
pydata-sphinx-theme>=0.15
@ -8,4 +8,4 @@ myst-nb>=1.1.1
pyyaml
sphinx-design
sphinx-copybutton
beautifulsoup4
beautifulsoup4

View File

@ -17,7 +17,10 @@ def process_toc_h3_elements(html_content: str) -> str:
# Process each element
for element in toc_h3_elements:
element = element.a.code.span
try:
element = element.a.code.span
except Exception:
continue
# Get the text content of the element
content = element.get_text()

View File

@ -15,7 +15,7 @@
:member-order: groupwise
:show-inheritance: True
:special-members: __call__
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, model_extra, model_fields_set, model_json_schema
{% block attributes %}

View File

@ -15,7 +15,7 @@
:member-order: groupwise
:show-inheritance: True
:special-members: __call__
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign, as_tool
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign, as_tool, get_config_jsonschema, get_input_jsonschema, get_output_jsonschema, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, to_json, model_extra, model_fields_set, model_json_schema
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃

View File

@ -13,45 +13,45 @@ From the opposite direction, scientists use `LangChain` in research and referenc
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation|
|------------------|---------|-------------------|------------------------|
| `2403.14403v2` [Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity](http://arxiv.org/abs/2403.14403v2) | Soyeong Jeong, Jinheon Baek, Sukmin Cho, et al. | 2024&#8209;03&#8209;21 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
| `2403.14403v2` [Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity](http://arxiv.org/abs/2403.14403v2) | Soyeong Jeong, Jinheon Baek, Sukmin Cho, et al. | 2024&#8209;03&#8209;21 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024&#8209;02&#8209;06 | `Cookbook:` [Self-Discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb)
| `2402.03367v2` [RAG-Fusion: a New Take on Retrieval-Augmented Generation](http://arxiv.org/abs/2402.03367v2) | Zackary Rackauckas | 2024&#8209;01&#8209;31 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
| `2402.03367v2` [RAG-Fusion: a New Take on Retrieval-Augmented Generation](http://arxiv.org/abs/2402.03367v2) | Zackary Rackauckas | 2024&#8209;01&#8209;31 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024&#8209;01&#8209;31 | `Cookbook:` [Raptor](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb)
| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024&#8209;01&#8209;29 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts), `Cookbook:` [Langgraph Crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
| `2401.08500v1` [Code Generation with AlphaCodium: From Prompt Engineering to Flow Engineering](http://arxiv.org/abs/2401.08500v1) | Tal Ridnik, Dedy Kredo, Itamar Friedman | 2024&#8209;01&#8209;16 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024&#8209;01&#8209;29 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Cookbook:` [Langgraph Crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
| `2401.08500v1` [Code Generation with AlphaCodium: From Prompt Engineering to Flow Engineering](http://arxiv.org/abs/2401.08500v1) | Tal Ridnik, Dedy Kredo, Itamar Friedman | 2024&#8209;01&#8209;16 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024&#8209;01&#8209;08 | `Cookbook:` [Together Ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb)
| `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023&#8209;12&#8209;11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval)
| `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023&#8209;11&#8209;15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki)
| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023&#8209;10&#8209;17 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts), `Cookbook:` [Langgraph Self Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023&#8209;10&#8209;09 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts), `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [Stepback-Qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023&#8209;10&#8209;17 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Cookbook:` [Langgraph Self Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023&#8209;10&#8209;09 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [Stepback-Qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
| `2307.15337v3` [Skeleton-of-Thought: Prompting LLMs for Efficient Parallel Generation](http://arxiv.org/abs/2307.15337v3) | Xuefei Ning, Zinan Lin, Zixuan Zhou, et al. | 2023&#8209;07&#8209;28 | `Template:` [skeleton-of-thought](https://python.langchain.com/docs/templates/skeleton-of-thought)
| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023&#8209;07&#8209;18 | `Cookbook:` [Semi Structured Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb)
| `2307.03172v3` [Lost in the Middle: How Language Models Use Long Contexts](http://arxiv.org/abs/2307.03172v3) | Nelson F. Liu, Kevin Lin, John Hewitt, et al. | 2023&#8209;07&#8209;06 | `Docs:` [docs/how_to/long_context_reorder](https://python.langchain.com/v0.2/docs/how_to/long_context_reorder)
| `2307.03172v3` [Lost in the Middle: How Language Models Use Long Contexts](http://arxiv.org/abs/2307.03172v3) | Nelson F. Liu, Kevin Lin, John Hewitt, et al. | 2023&#8209;07&#8209;06 | `Docs:` [docs/how_to/long_context_reorder](https://python.langchain.com/docs/how_to/long_context_reorder)
| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023&#8209;05&#8209;23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [Rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb)
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023&#8209;05&#8209;15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [Tree Of Thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb)
| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023&#8209;05&#8209;06 | `Cookbook:` [Plan And Execute Agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb)
| `2305.02156v1` [Zero-Shot Listwise Document Reranking with a Large Language Model](http://arxiv.org/abs/2305.02156v1) | Xueguang Ma, Xinyu Zhang, Ronak Pradeep, et al. | 2023&#8209;05&#8209;03 | `Docs:` [docs/how_to/contextual_compression](https://python.langchain.com/v0.2/docs/how_to/contextual_compression), `API:` [langchain...LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank)
| `2305.02156v1` [Zero-Shot Listwise Document Reranking with a Large Language Model](http://arxiv.org/abs/2305.02156v1) | Xueguang Ma, Xinyu Zhang, Ronak Pradeep, et al. | 2023&#8209;05&#8209;03 | `Docs:` [docs/how_to/contextual_compression](https://python.langchain.com/docs/how_to/contextual_compression), `API:` [langchain...LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank)
| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023&#8209;04&#8209;17 | `Cookbook:` [Semi Structured Multi Modal Rag Llama2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb), [Semi Structured And Multi Modal Rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb)
| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023&#8209;04&#8209;07 | `Cookbook:` [Generative Agents Interactive Simulacra Of Human Behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb), [Multiagent Bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb)
| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023&#8209;03&#8209;31 | `Cookbook:` [Camel Role Playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb)
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023&#8209;03&#8209;30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [Hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb)
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023&#8209;01&#8209;24 | `API:` [langchain_community...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022&#8209;12&#8209;20 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts), `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [Hypothetical Document Embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
| `2212.08073v1` [Constitutional AI: Harmlessness from AI Feedback](http://arxiv.org/abs/2212.08073v1) | Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al. | 2022&#8209;12&#8209;15 | `Docs:` [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/v0.2/docs/versions/migrating_chains/constitutional_chain)
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022&#8209;12&#8209;20 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts), `API:` [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [Hypothetical Document Embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
| `2212.08073v1` [Constitutional AI: Harmlessness from AI Feedback](http://arxiv.org/abs/2212.08073v1) | Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al. | 2022&#8209;12&#8209;15 | `Docs:` [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/docs/versions/migrating_chains/constitutional_chain)
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022&#8209;12&#8209;12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022&#8209;11&#8209;25 | `API:` [langchain_core...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022&#8209;11&#8209;18 | `API:` [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), `Cookbook:` [Program Aided Language Model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb)
| `2210.11934v2` [An Analysis of Fusion Functions for Hybrid Retrieval](http://arxiv.org/abs/2210.11934v2) | Sebastian Bruch, Siyu Gai, Amir Ingber | 2022&#8209;10&#8209;21 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022&#8209;10&#8209;06 | `Docs:` [docs/integrations/tools/ionic_shopping](https://python.langchain.com/v0.2/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/v0.2/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/v0.2/docs/concepts), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022&#8209;09&#8209;22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/v0.2/docs/integrations/providers/activeloop_deeplake)
| `2205.13147v4` [Matryoshka Representation Learning](http://arxiv.org/abs/2205.13147v4) | Aditya Kusupati, Gantavya Bhatt, Aniket Rege, et al. | 2022&#8209;05&#8209;26 | `Docs:` [docs/integrations/providers/snowflake](https://python.langchain.com/v0.2/docs/integrations/providers/snowflake)
| `2210.11934v2` [An Analysis of Fusion Functions for Hybrid Retrieval](http://arxiv.org/abs/2210.11934v2) | Sebastian Bruch, Siyu Gai, Amir Ingber | 2022&#8209;10&#8209;21 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
| `2210.03629v3` [ReAct: Synergizing Reasoning and Acting in Language Models](http://arxiv.org/abs/2210.03629v3) | Shunyu Yao, Jeffrey Zhao, Dian Yu, et al. | 2022&#8209;10&#8209;06 | `Docs:` [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/docs/concepts), `API:` [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022&#8209;09&#8209;22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
| `2205.13147v4` [Matryoshka Representation Learning](http://arxiv.org/abs/2205.13147v4) | Aditya Kusupati, Gantavya Bhatt, Aniket Rege, et al. | 2022&#8209;05&#8209;26 | `Docs:` [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022&#8209;05&#8209;25 | `API:` [langchain_community...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022&#8209;03&#8209;15 | `Docs:` [docs/tutorials/sql_qa](https://python.langchain.com/v0.2/docs/tutorials/sql_qa), `API:` [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022&#8209;03&#8209;15 | `Docs:` [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa), `API:` [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022&#8209;02&#8209;01 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
| `2112.01488v3` [ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction](http://arxiv.org/abs/2112.01488v3) | Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, et al. | 2021&#8209;12&#8209;02 | `Docs:` [docs/integrations/retrievers/ragatouille](https://python.langchain.com/v0.2/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/v0.2/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/v0.2/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/v0.2/docs/integrations/providers/dspy)
| `2112.01488v3` [ColBERTv2: Effective and Efficient Retrieval via Lightweight Late Interaction](http://arxiv.org/abs/2112.01488v3) | Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, et al. | 2021&#8209;12&#8209;02 | `Docs:` [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021&#8209;02&#8209;26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
| `2005.14165v4` [Language Models are Few-Shot Learners](http://arxiv.org/abs/2005.14165v4) | Tom B. Brown, Benjamin Mann, Nick Ryder, et al. | 2020&#8209;05&#8209;28 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
| `2005.11401v4` [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](http://arxiv.org/abs/2005.11401v4) | Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al. | 2020&#8209;05&#8209;22 | `Docs:` [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
| `2005.14165v4` [Language Models are Few-Shot Learners](http://arxiv.org/abs/2005.14165v4) | Tom B. Brown, Benjamin Mann, Nick Ryder, et al. | 2020&#8209;05&#8209;28 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
| `2005.11401v4` [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](http://arxiv.org/abs/2005.11401v4) | Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al. | 2020&#8209;05&#8209;22 | `Docs:` [docs/concepts](https://python.langchain.com/docs/concepts)
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019&#8209;09&#8209;11 | `API:` [langchain_huggingface...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
## Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity
@ -60,7 +60,7 @@ From the opposite direction, scientists use `LangChain` in research and referenc
- **arXiv id:** [2403.14403v2](http://arxiv.org/abs/2403.14403v2) **Published Date:** 2024-03-21
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
**Abstract:** Retrieval-Augmented Large Language Models (LLMs), which incorporate the
non-parametric knowledge from external knowledge bases into LLMs, have emerged
@ -113,7 +113,7 @@ commonalities with human reasoning patterns.
- **arXiv id:** [2402.03367v2](http://arxiv.org/abs/2402.03367v2) **Published Date:** 2024-01-31
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
**Abstract:** Infineon has identified a need for engineers, account managers, and customers
to rapidly obtain product information. This problem is traditionally addressed
@ -159,7 +159,7 @@ benchmark by 20% in absolute accuracy.
- **arXiv id:** [2401.15884v2](http://arxiv.org/abs/2401.15884v2) **Published Date:** 2024-01-29
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
- **Cookbook:** [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb)
**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the
@ -187,7 +187,7 @@ performance of RAG-based approaches.
- **arXiv id:** [2401.08500v1](http://arxiv.org/abs/2401.08500v1) **Published Date:** 2024-01-16
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
**Abstract:** Code generation problems differ from common natural language problems - they
require matching the exact syntax of the target language, identifying happy
@ -293,7 +293,7 @@ outside the pre-training knowledge scope.
- **arXiv id:** [2310.11511v1](http://arxiv.org/abs/2310.11511v1) **Published Date:** 2023-10-17
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
- **Cookbook:** [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb)
**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often
@ -324,7 +324,7 @@ to these models.
- **arXiv id:** [2310.06117v2](http://arxiv.org/abs/2310.06117v2) **Published Date:** 2023-10-09
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
- **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting)
- **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb)
@ -384,7 +384,7 @@ contribute to the responsible development of LLMs.
- **arXiv id:** [2307.03172v3](http://arxiv.org/abs/2307.03172v3) **Published Date:** 2023-07-06
- **LangChain:**
- **Documentation:** [docs/how_to/long_context_reorder](https://python.langchain.com/v0.2/docs/how_to/long_context_reorder)
- **Documentation:** [docs/how_to/long_context_reorder](https://python.langchain.com/docs/how_to/long_context_reorder)
**Abstract:** While recent language models have the ability to take long contexts as input,
relatively little is known about how well they use longer context. We analyze
@ -490,7 +490,7 @@ https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting.
- **arXiv id:** [2305.02156v1](http://arxiv.org/abs/2305.02156v1) **Published Date:** 2023-05-03
- **LangChain:**
- **Documentation:** [docs/how_to/contextual_compression](https://python.langchain.com/v0.2/docs/how_to/contextual_compression)
- **Documentation:** [docs/how_to/contextual_compression](https://python.langchain.com/docs/how_to/contextual_compression)
- **API Reference:** [langchain...LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html#langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank)
**Abstract:** Supervised ranking methods based on bi-encoder or cross-encoder architectures
@ -649,7 +649,7 @@ family, and discuss robustness and security.
- **arXiv id:** [2212.10496v1](http://arxiv.org/abs/2212.10496v1) **Published Date:** 2022-12-20
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
- **API Reference:** [langchain...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
- **Template:** [hyde](https://python.langchain.com/docs/templates/hyde)
- **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb)
@ -678,7 +678,7 @@ search, QA, fact verification) and languages~(e.g. sw, ko, ja).
- **arXiv id:** [2212.08073v1](http://arxiv.org/abs/2212.08073v1) **Published Date:** 2022-12-15
- **LangChain:**
- **Documentation:** [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/v0.2/docs/versions/migrating_chains/constitutional_chain)
- **Documentation:** [docs/versions/migrating_chains/constitutional_chain](https://python.langchain.com/docs/versions/migrating_chains/constitutional_chain)
**Abstract:** As AI systems become more capable, we would like to enlist their help to
supervise other AIs. We experiment with methods for training a harmless AI
@ -792,7 +792,7 @@ publicly available at http://reasonwithpal.com/ .
- **arXiv id:** [2210.11934v2](http://arxiv.org/abs/2210.11934v2) **Published Date:** 2022-10-21
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
**Abstract:** We study hybrid search in text retrieval where lexical and semantic search
are fused together with the intuition that the two are complementary in how
@ -811,7 +811,7 @@ training examples to tune its only parameter to a target domain.
- **arXiv id:** [2210.03629v3](http://arxiv.org/abs/2210.03629v3) **Published Date:** 2022-10-06
- **LangChain:**
- **Documentation:** [docs/integrations/tools/ionic_shopping](https://python.langchain.com/v0.2/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/v0.2/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/integrations/tools/ionic_shopping](https://python.langchain.com/docs/integrations/tools/ionic_shopping), [docs/integrations/providers/cohere](https://python.langchain.com/docs/integrations/providers/cohere), [docs/concepts](https://python.langchain.com/docs/concepts)
- **API Reference:** [langchain...create_react_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.react.agent.create_react_agent.html#langchain.agents.react.agent.create_react_agent), [langchain...TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain)
**Abstract:** While large language models (LLMs) have demonstrated impressive capabilities
@ -843,7 +843,7 @@ Project site with code: https://react-lm.github.io
- **arXiv id:** [2209.10785v2](http://arxiv.org/abs/2209.10785v2) **Published Date:** 2022-09-22
- **LangChain:**
- **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/v0.2/docs/integrations/providers/activeloop_deeplake)
- **Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
**Abstract:** Traditional data lakes provide critical data infrastructure for analytical
workloads by enabling time travel, running SQL queries, ingesting data with
@ -868,7 +868,7 @@ TensorFlow, JAX, and integrate with numerous MLOps tools.
- **arXiv id:** [2205.13147v4](http://arxiv.org/abs/2205.13147v4) **Published Date:** 2022-05-26
- **LangChain:**
- **Documentation:** [docs/integrations/providers/snowflake](https://python.langchain.com/v0.2/docs/integrations/providers/snowflake)
- **Documentation:** [docs/integrations/providers/snowflake](https://python.langchain.com/docs/integrations/providers/snowflake)
**Abstract:** Learned representations are a central component in modern ML systems, serving
a multitude of downstream tasks. When training such representations, it is
@ -925,7 +925,7 @@ encoders, mine bitexts, and validate the bitexts by training NMT systems.
- **arXiv id:** [2204.00498v1](http://arxiv.org/abs/2204.00498v1) **Published Date:** 2022-03-15
- **LangChain:**
- **Documentation:** [docs/tutorials/sql_qa](https://python.langchain.com/v0.2/docs/tutorials/sql_qa)
- **Documentation:** [docs/tutorials/sql_qa](https://python.langchain.com/docs/tutorials/sql_qa)
- **API Reference:** [langchain_community...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
@ -971,7 +971,7 @@ reducing degenerate repetitions.
- **arXiv id:** [2112.01488v3](http://arxiv.org/abs/2112.01488v3) **Published Date:** 2021-12-02
- **LangChain:**
- **Documentation:** [docs/integrations/retrievers/ragatouille](https://python.langchain.com/v0.2/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/v0.2/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/v0.2/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/v0.2/docs/integrations/providers/dspy)
- **Documentation:** [docs/integrations/retrievers/ragatouille](https://python.langchain.com/docs/integrations/retrievers/ragatouille), [docs/integrations/providers/ragatouille](https://python.langchain.com/docs/integrations/providers/ragatouille), [docs/concepts](https://python.langchain.com/docs/concepts), [docs/integrations/providers/dspy](https://python.langchain.com/docs/integrations/providers/dspy)
**Abstract:** Neural information retrieval (IR) has greatly advanced search and other
knowledge-intensive language tasks. While many neural IR methods encode queries
@ -1022,7 +1022,7 @@ https://github.com/OpenAI/CLIP.
- **arXiv id:** [2005.14165v4](http://arxiv.org/abs/2005.14165v4) **Published Date:** 2020-05-28
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
**Abstract:** Recent work has demonstrated substantial gains on many NLP tasks and
benchmarks by pre-training on a large corpus of text followed by fine-tuning on
@ -1055,7 +1055,7 @@ and of GPT-3 in general.
- **arXiv id:** [2005.11401v4](http://arxiv.org/abs/2005.11401v4) **Published Date:** 2020-05-22
- **LangChain:**
- **Documentation:** [docs/concepts](https://python.langchain.com/v0.2/docs/concepts)
- **Documentation:** [docs/concepts](https://python.langchain.com/docs/concepts)
**Abstract:** Large pre-trained language models have been shown to store factual knowledge
in their parameters, and achieve state-of-the-art results when fine-tuned on

View File

@ -97,7 +97,7 @@ For guides on how to do specific tasks with LCEL, check out [the relevant how-to
### Runnable interface
<span data-heading-keywords="invoke,runnable"></span>
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
To make it as easy as possible to create custom chains, we've implemented a ["Runnable"](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) protocol. Many LangChain components implement the `Runnable` protocol, including chat models, LLMs, output parsers, retrievers, prompt templates, and more. There are also several useful primitives for working with runnables, which you can read about below.
This is a standard interface, which makes it easy to define custom chains as well as invoke them in a standard way.
The standard interface includes:
@ -380,17 +380,17 @@ LangChain has lots of different types of output parsers. This is a list of outpu
| Name | Supports Streaming | Has Format Instructions | Calls LLM | Input Type | Output Type | Description |
|-----------------|--------------------|-------------------------------|-----------|----------------------------------|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [JSON](https://python.langchain.com/v0.2/api_reference/core/output_parsers/langchain_core.output_parsers.json.JsonOutputParser.html#langchain_core.output_parsers.json.JsonOutputParser) | ✅ | ✅ | | `str` \| `Message` | JSON object | Returns a JSON object as specified. You can specify a Pydantic model and it will return JSON for that model. Probably the most reliable output parser for getting structured data that does NOT use function calling. |
| [XML](https://python.langchain.com/v0.2/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html#langchain_core.output_parsers.xml.XMLOutputParser) | ✅ | ✅ | | `str` \| `Message` | `dict` | Returns a dictionary of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). |
| [CSV](https://python.langchain.com/v0.2/api_reference/core/output_parsers/langchain_core.output_parsers.list.CommaSeparatedListOutputParser.html#langchain_core.output_parsers.list.CommaSeparatedListOutputParser) | ✅ | ✅ | | `str` \| `Message` | `List[str]` | Returns a list of comma separated values. |
| [OutputFixing](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser) | | | ✅ | `str` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the error message and the bad output to an LLM and ask it to fix the output. |
| [RetryWithError](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.retry.RetryWithErrorOutputParser.html#langchain.output_parsers.retry.RetryWithErrorOutputParser) | | | ✅ | `str` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the original inputs, the bad output, and the error message to an LLM and ask it to fix it. Compared to OutputFixingParser, this one also sends the original instructions. |
| [Pydantic](https://python.langchain.com/v0.2/api_reference/core/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html#langchain_core.output_parsers.pydantic.PydanticOutputParser) | | ✅ | | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. |
| [YAML](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.yaml.YamlOutputParser.html#langchain.output_parsers.yaml.YamlOutputParser) | | ✅ | | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. Uses YAML to encode it. |
| [PandasDataFrame](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser.html#langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser) | | ✅ | | `str` \| `Message` | `dict` | Useful for doing operations with pandas DataFrames. |
| [Enum](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.enum.EnumOutputParser.html#langchain.output_parsers.enum.EnumOutputParser) | | ✅ | | `str` \| `Message` | `Enum` | Parses response into one of the provided enum values. |
| [Datetime](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser) | | ✅ | | `str` \| `Message` | `datetime.datetime` | Parses response into a datetime string. |
| [Structured](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser) | | ✅ | | `str` \| `Message` | `Dict[str, str]` | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. |
| [JSON](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.json.JsonOutputParser.html#langchain_core.output_parsers.json.JsonOutputParser) | ✅ | ✅ | | `str` \| `Message` | JSON object | Returns a JSON object as specified. You can specify a Pydantic model and it will return JSON for that model. Probably the most reliable output parser for getting structured data that does NOT use function calling. |
| [XML](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html#langchain_core.output_parsers.xml.XMLOutputParser) | ✅ | ✅ | | `str` \| `Message` | `dict` | Returns a dictionary of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). |
| [CSV](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.list.CommaSeparatedListOutputParser.html#langchain_core.output_parsers.list.CommaSeparatedListOutputParser) | ✅ | ✅ | | `str` \| `Message` | `List[str]` | Returns a list of comma separated values. |
| [OutputFixing](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser) | | | ✅ | `str` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the error message and the bad output to an LLM and ask it to fix the output. |
| [RetryWithError](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.retry.RetryWithErrorOutputParser.html#langchain.output_parsers.retry.RetryWithErrorOutputParser) | | | ✅ | `str` \| `Message` | | Wraps another output parser. If that output parser errors, then this will pass the original inputs, the bad output, and the error message to an LLM and ask it to fix it. Compared to OutputFixingParser, this one also sends the original instructions. |
| [Pydantic](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html#langchain_core.output_parsers.pydantic.PydanticOutputParser) | | ✅ | | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. |
| [YAML](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.yaml.YamlOutputParser.html#langchain.output_parsers.yaml.YamlOutputParser) | | ✅ | | `str` \| `Message` | `pydantic.BaseModel` | Takes a user defined Pydantic model and returns data in that format. Uses YAML to encode it. |
| [PandasDataFrame](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser.html#langchain.output_parsers.pandas_dataframe.PandasDataFrameOutputParser) | | ✅ | | `str` \| `Message` | `dict` | Useful for doing operations with pandas DataFrames. |
| [Enum](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.enum.EnumOutputParser.html#langchain.output_parsers.enum.EnumOutputParser) | | ✅ | | `str` \| `Message` | `Enum` | Parses response into one of the provided enum values. |
| [Datetime](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser) | | ✅ | | `str` \| `Message` | `datetime.datetime` | Parses response into a datetime string. |
| [Structured](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser) | | ✅ | | `str` \| `Message` | `Dict[str, str]` | An output parser that returns structured information. It is less powerful than other output parsers since it only allows for fields to be strings. This can be useful when you are working with smaller LLMs. |
For specifics on how to use output parsers, see the [relevant how-to guides here](/docs/how_to/#output-parsers).
@ -501,7 +501,7 @@ For specifics on how to use retrievers, see the [relevant how-to guides here](/d
For some techniques, such as [indexing and retrieval with multiple vectors per document](/docs/how_to/multi_vector/) or
[caching embeddings](/docs/how_to/caching_embeddings/), having a form of key-value (KV) storage is helpful.
LangChain includes a [`BaseStore`](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.BaseStore.html) interface,
LangChain includes a [`BaseStore`](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.BaseStore.html) interface,
which allows for storage of arbitrary data. However, LangChain components that require KV-storage accept a
more specific `BaseStore[str, bytes]` instance that stores binary data (referred to as a `ByteStore`), and internally take care of
encoding and decoding data for their specific needs.
@ -510,7 +510,7 @@ This means that as a user, you only need to think about one type of store rather
#### Interface
All [`BaseStores`](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows
All [`BaseStores`](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.BaseStore.html) support the following interface. Note that the interface allows
for modifying **multiple** key-value pairs at once:
- `mget(key: Sequence[str]) -> List[Optional[bytes]]`: get the contents of multiple keys, returning `None` if the key does not exist
@ -708,10 +708,10 @@ You can subscribe to these events by using the `callbacks` argument available th
Callback handlers can either be `sync` or `async`:
* Sync callback handlers implement the [BaseCallbackHandler](https://python.langchain.com/v0.2/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface.
* Async callback handlers implement the [AsyncCallbackHandler](https://python.langchain.com/v0.2/api_reference/core/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface.
* Sync callback handlers implement the [BaseCallbackHandler](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface.
* Async callback handlers implement the [AsyncCallbackHandler](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface.
During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://python.langchain.com/v0.2/api_reference/core/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://python.langchain.com/v0.2/api_reference/core/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered.
During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered.
#### Passing callbacks
@ -779,7 +779,7 @@ For models (or other components) that don't support streaming natively, this ite
you could still use the same general pattern when calling them. Using `.stream()` will also automatically call the model in streaming mode
without the need to provide additional config.
The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html).
The type of each outputted chunk depends on the type of component - for example, chat models yield [`AIMessageChunks`](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html).
Because this method is part of [LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel),
you can handle formatting differences from different outputs using an [output parser](/docs/concepts/#output-parsers) to transform
each yielded chunk.
@ -827,10 +827,10 @@ including a table listing available events.
#### Callbacks
The lowest level way to stream outputs from LLMs in LangChain is via the [callbacks](/docs/concepts/#callbacks) system. You can pass a
callback handler that handles the [`on_llm_new_token`](https://python.langchain.com/v0.2/api_reference/langchain/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any
callback handler that handles the [`on_llm_new_token`](https://python.langchain.com/api_reference/langchain/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_new_token) event into LangChain components. When that component is invoked, any
[LLM](/docs/concepts/#llms) or [chat model](/docs/concepts/#chat-models) contained in the component calls
the callback with the generated token. Within the callback, you could pipe the tokens into some other destination, e.g. a HTTP response.
You can also handle the [`on_llm_end`](https://python.langchain.com/v0.2/api_reference/langchain/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup.
You can also handle the [`on_llm_end`](https://python.langchain.com/api_reference/langchain/callbacks/langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.html#langchain.callbacks.streaming_aiter.AsyncIteratorCallbackHandler.on_llm_end) event to perform any necessary cleanup.
You can see [this how-to section](/docs/how_to/#callbacks) for more specifics on using callbacks.
@ -945,7 +945,7 @@ Here's an example:
```python
from typing import Optional
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
class Joke(BaseModel):
@ -1062,7 +1062,7 @@ a `tool_calls` field containing `args` that match the desired shape.
There are several acceptable formats you can use to bind tools to a model in LangChain. Here's one example:
```python
from langchain_core.pydantic_v1 import BaseModel, Field
from pydantic import BaseModel, Field
from langchain_openai import ChatOpenAI
class ResponseFormatter(BaseModel):

View File

@ -12,7 +12,7 @@ It covers a wide array of topics, including tutorials, use cases, integrations,
and more, offering extensive guidance on building with LangChain.
The content for this documentation lives in the `/docs` directory of the monorepo.
2. In-code Documentation: This is documentation of the codebase itself, which is also
used to generate the externally facing [API Reference](https://python.langchain.com/v0.2/api_reference/langchain/index.html).
used to generate the externally facing [API Reference](https://python.langchain.com/api_reference/langchain/index.html).
The content for the API reference is autogenerated by scanning the docstrings in the codebase. For this reason we ask that
developers document their code well.

View File

@ -50,7 +50,7 @@ There are other files in the root directory level, but their presence should be
## Documentation
The `/docs` directory contains the content for the documentation that is shown
at https://python.langchain.com/ and the associated API Reference https://python.langchain.com/v0.2/api_reference/langchain/index.html.
at https://python.langchain.com/ and the associated API Reference https://python.langchain.com/api_reference/langchain/index.html.
See the [documentation](/docs/contributing/documentation/) guidelines to learn how to contribute to the documentation.

View File

@ -13,7 +13,7 @@
"# How to split by HTML header \n",
"## Description and motivation\n",
"\n",
"[HTMLHeaderTextSplitter](https://python.langchain.com/v0.2/api_reference/text_splitters/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) is a \"structure-aware\" chunker that splits text at the HTML element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline.\n",
"[HTMLHeaderTextSplitter](https://python.langchain.com/api_reference/text_splitters/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) is a \"structure-aware\" chunker that splits text at the HTML element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline.\n",
"\n",
"It is analogous to the [MarkdownHeaderTextSplitter](/docs/how_to/markdown_header_metadata_splitter) for markdown files.\n",
"\n",

View File

@ -9,7 +9,7 @@
"\n",
"Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on a distance metric. But, retrieval may produce different results with subtle changes in query wording, or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n",
"\n",
"The [MultiQueryRetriever](https://python.langchain.com/v0.2/api_reference/langchain/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` can mitigate some of the limitations of the distance-based retrieval and get a richer set of results.\n",
"The [MultiQueryRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` can mitigate some of the limitations of the distance-based retrieval and get a richer set of results.\n",
"\n",
"Let's build a vectorstore using the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng from the [RAG tutorial](/docs/tutorials/rag):"
]
@ -18,8 +18,23 @@
"cell_type": "code",
"execution_count": 1,
"id": "994d6c74",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:00.190093Z",
"iopub.status.busy": "2024-09-10T20:08:00.189665Z",
"iopub.status.idle": "2024-09-10T20:08:05.438015Z",
"shell.execute_reply": "2024-09-10T20:08:05.437685Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
]
}
],
"source": [
"# Build a sample vectorDB\n",
"from langchain_chroma import Chroma\n",
@ -54,7 +69,14 @@
"cell_type": "code",
"execution_count": 2,
"id": "edbca101",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:05.439930Z",
"iopub.status.busy": "2024-09-10T20:08:05.439810Z",
"iopub.status.idle": "2024-09-10T20:08:05.553766Z",
"shell.execute_reply": "2024-09-10T20:08:05.553520Z"
}
},
"outputs": [],
"source": [
"from langchain.retrievers.multi_query import MultiQueryRetriever\n",
@ -71,7 +93,14 @@
"cell_type": "code",
"execution_count": 3,
"id": "9e6d3b69",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:05.555359Z",
"iopub.status.busy": "2024-09-10T20:08:05.555262Z",
"iopub.status.idle": "2024-09-10T20:08:05.557046Z",
"shell.execute_reply": "2024-09-10T20:08:05.556825Z"
}
},
"outputs": [],
"source": [
"# Set logging for the queries\n",
@ -85,13 +114,20 @@
"cell_type": "code",
"execution_count": 4,
"id": "bc93dc2b-9407-48b0-9f9a-338247e7eb69",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:05.558176Z",
"iopub.status.busy": "2024-09-10T20:08:05.558100Z",
"iopub.status.idle": "2024-09-10T20:08:07.250342Z",
"shell.execute_reply": "2024-09-10T20:08:07.249711Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. How can Task Decomposition be achieved through different methods?', '2. What strategies are commonly used for Task Decomposition?', '3. What are the various techniques for breaking down tasks in Task Decomposition?']\n"
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. How can Task Decomposition be achieved through different methods?', '2. What strategies are commonly used for Task Decomposition?', '3. What are the various ways to break down tasks in Task Decomposition?']\n"
]
},
{
@ -125,9 +161,9 @@
"source": [
"#### Supplying your own prompt\n",
"\n",
"Under the hood, `MultiQueryRetriever` generates queries using a specific [prompt](https://python.langchain.com/v0.2/api_reference/langchain/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html). To customize this prompt:\n",
"Under the hood, `MultiQueryRetriever` generates queries using a specific [prompt](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html). To customize this prompt:\n",
"\n",
"1. Make a [PromptTemplate](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.prompt.PromptTemplate.html) with an input variable for the question;\n",
"1. Make a [PromptTemplate](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.prompt.PromptTemplate.html) with an input variable for the question;\n",
"2. Implement an [output parser](/docs/concepts#output-parsers) like the one below to split the result into a list of queries.\n",
"\n",
"The prompt and output parser together must support the generation of a list of queries."
@ -137,14 +173,21 @@
"cell_type": "code",
"execution_count": 5,
"id": "d9afb0ca",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:07.253875Z",
"iopub.status.busy": "2024-09-10T20:08:07.253600Z",
"iopub.status.idle": "2024-09-10T20:08:07.277848Z",
"shell.execute_reply": "2024-09-10T20:08:07.277487Z"
}
},
"outputs": [],
"source": [
"from typing import List\n",
"\n",
"from langchain_core.output_parsers import BaseOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"# Output parser will split the LLM result into a list of queries\n",
@ -180,13 +223,20 @@
"cell_type": "code",
"execution_count": 6,
"id": "59c75c56-dbd7-4887-b9ba-0b5b21069f51",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:08:07.280001Z",
"iopub.status.busy": "2024-09-10T20:08:07.279861Z",
"iopub.status.idle": "2024-09-10T20:08:09.579525Z",
"shell.execute_reply": "2024-09-10T20:08:09.578837Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer about regression?', '4. In what way is regression covered in the course?', '5. What are the teachings of the course regarding regression?']\n"
"INFO:langchain.retrievers.multi_query:Generated queries: ['1. Can you provide insights on regression from the course material?', '2. How is regression discussed in the course content?', '3. What information does the course offer regarding regression?', '4. In what way is regression covered in the course?', \"5. What are the course's teachings on regression?\"]\n"
]
},
{
@ -228,7 +278,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -7,7 +7,7 @@
"source": [
"# How to add scores to retriever results\n",
"\n",
"Retrievers will return sequences of [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html) objects, which by default include no information about the process that retrieved them (e.g., a similarity score against a query). Here we demonstrate how to add retrieval scores to the `.metadata` of documents:\n",
"Retrievers will return sequences of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects, which by default include no information about the process that retrieved them (e.g., a similarity score against a query). Here we demonstrate how to add retrieval scores to the `.metadata` of documents:\n",
"1. From [vectorstore retrievers](/docs/how_to/vectorstore_retriever);\n",
"2. From higher-order LangChain retrievers, such as [SelfQueryRetriever](/docs/how_to/self_query) or [MultiVectorRetriever](/docs/how_to/multi_vector).\n",
"\n",
@ -15,7 +15,7 @@
"\n",
"## Create vector store\n",
"\n",
"First we populate a vector store with some data. We will use a [PineconeVectorStore](https://python.langchain.com/v0.2/api_reference/pinecone/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html), but this guide is compatible with any LangChain vector store that implements a `.similarity_search_with_score` method."
"First we populate a vector store with some data. We will use a [PineconeVectorStore](https://python.langchain.com/api_reference/pinecone/vectorstores/langchain_pinecone.vectorstores.PineconeVectorStore.html), but this guide is compatible with any LangChain vector store that implements a `.similarity_search_with_score` method."
]
},
{
@ -263,7 +263,7 @@
"\n",
"To propagate similarity scores through this retriever, we can again subclass `MultiVectorRetriever` and override a method. This time we will override `_get_relevant_documents`.\n",
"\n",
"First, we prepare some fake data. We generate fake \"whole documents\" and store them in a document store; here we will use a simple [InMemoryStore](https://python.langchain.com/v0.2/api_reference/core/stores/langchain_core.stores.InMemoryBaseStore.html)."
"First, we prepare some fake data. We generate fake \"whole documents\" and store them in a document store; here we will use a simple [InMemoryStore](https://python.langchain.com/api_reference/core/stores/langchain_core.stores.InMemoryBaseStore.html)."
]
},
{

View File

@ -27,7 +27,7 @@
"\n",
":::\n",
"\n",
"An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html#langchain_core.runnables.passthrough.RunnablePassthrough.assign) static method takes an input value and adds the extra arguments passed to the assign function.\n",
"An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html#langchain_core.runnables.passthrough.RunnablePassthrough.assign) static method takes an input value and adds the extra arguments passed to the assign function.\n",
"\n",
"This is useful in the common [LangChain Expression Language](/docs/concepts/#langchain-expression-language) pattern of additively creating a dictionary to use as input to a later step.\n",
"\n",
@ -45,7 +45,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{

View File

@ -27,7 +27,7 @@
"\n",
":::\n",
"\n",
"Sometimes we want to invoke a [`Runnable`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) within a [RunnableSequence](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://python.langchain.com/v0.2/api_reference/langchain_core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind) method to set these arguments ahead of time.\n",
"Sometimes we want to invoke a [`Runnable`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) within a [RunnableSequence](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://python.langchain.com/api_reference/langchain_core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind) method to set these arguments ahead of time.\n",
"\n",
"## Binding stop sequences\n",
"\n",
@ -49,7 +49,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@ -183,7 +184,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_z0OU2CytqENVrRTI6T8DkI3u', 'function': {'arguments': '{\"location\": \"San Francisco, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw', 'function': {'arguments': '{\"location\": \"New York, NY\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH', 'function': {'arguments': '{\"location\": \"Los Angeles, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 84, 'prompt_tokens': 85, 'total_tokens': 169}, 'model_name': 'gpt-3.5-turbo-1106', 'system_fingerprint': 'fp_77a673219d', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d57ad5fa-b52a-4822-bc3e-74f838697e18-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco, CA', 'unit': 'celsius'}, 'id': 'call_z0OU2CytqENVrRTI6T8DkI3u'}, {'name': 'get_current_weather', 'args': {'location': 'New York, NY', 'unit': 'celsius'}, 'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw'}, {'name': 'get_current_weather', 'args': {'location': 'Los Angeles, CA', 'unit': 'celsius'}, 'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH'}])"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_z0OU2CytqENVrRTI6T8DkI3u', 'function': {'arguments': '{\"location\": \"San Francisco, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw', 'function': {'arguments': '{\"location\": \"New York, NY\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}, {'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH', 'function': {'arguments': '{\"location\": \"Los Angeles, CA\", \"unit\": \"celsius\"}', 'name': 'get_current_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 84, 'prompt_tokens': 85, 'total_tokens': 169}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': 'fp_77a673219d', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d57ad5fa-b52a-4822-bc3e-74f838697e18-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco, CA', 'unit': 'celsius'}, 'id': 'call_z0OU2CytqENVrRTI6T8DkI3u'}, {'name': 'get_current_weather', 'args': {'location': 'New York, NY', 'unit': 'celsius'}, 'id': 'call_ft96IJBh0cMKkQWrZjNg4bsw'}, {'name': 'get_current_weather', 'args': {'location': 'Los Angeles, CA', 'unit': 'celsius'}, 'id': 'call_tfbtGgCLmuBuWgZLvpPwvUMH'}])"
]
},
"execution_count": 5,
@ -192,7 +193,7 @@
}
],
"source": [
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\").bind(tools=tools)\n",
"model = ChatOpenAI(model=\"gpt-4o-mini\").bind(tools=tools)\n",
"model.invoke(\"What's the weather in SF, NYC and LA?\")"
]
},

View File

@ -14,7 +14,7 @@
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
":::\n",
"\n",
"If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://python.langchain.com/v0.2/api_reference/core/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event.\n",
"If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event.\n",
"\n",
"\n",
":::{.callout-warning}\n",

View File

@ -17,7 +17,7 @@
"\n",
":::\n",
"\n",
"If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n",
"If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n",
"\n",
":::{.callout-important}\n",
"\n",

View File

@ -15,7 +15,7 @@
"\n",
":::\n",
"\n",
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://python.langchain.com/v0.2/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
"\n",
"This prevents us from having to manually attach the handlers to each individual nested object. Here's an example:"
]

View File

@ -28,7 +28,7 @@
"\n",
"To obtain the string content directly, use `.split_text`.\n",
"\n",
"To create LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html) objects (e.g., for use in downstream tasks), use `.create_documents`."
"To create LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects (e.g., for use in downstream tasks), use `.create_documents`."
]
},
{

View File

@ -50,7 +50,8 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"\n",
"llm = ChatOpenAI()"
]

View File

@ -11,7 +11,7 @@
"\n",
":::tip Supported models\n",
"\n",
"See the [init_chat_model()](https://python.langchain.com/v0.2/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n",
"See the [init_chat_model()](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n",
"\n",
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
"\n",
@ -26,10 +26,32 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "165b0de6-9ae3-4e3d-aa98-4fc8a97c4a06",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:32.858670Z",
"iopub.status.busy": "2024-09-10T20:22:32.858278Z",
"iopub.status.idle": "2024-09-10T20:22:33.009452Z",
"shell.execute_reply": "2024-09-10T20:22:33.007022Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"zsh:1: 0.2.8 not found\r\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
]
@ -44,19 +66,48 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "79e14913-803c-4382-9009-5c6af3d75d35",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:33.015729Z",
"iopub.status.busy": "2024-09-10T20:22:33.015241Z",
"iopub.status.idle": "2024-09-10T20:22:39.391716Z",
"shell.execute_reply": "2024-09-10T20:22:39.390438Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_95293/571506279.py:4: LangChainBetaWarning: The function `init_chat_model` is in beta. It is actively being worked on, so the API may change.\n",
" gpt_4o = init_chat_model(\"gpt-4o\", model_provider=\"openai\", temperature=0)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT-4o: I'm an AI created by OpenAI, and I don't have a personal name. You can call me Assistant! How can I help you today?\n",
"\n",
"Claude Opus: My name is Claude. It's nice to meet you!\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Gemini 1.5: I am a large language model, trained by Google. \n",
"\n",
"Gemini 1.5: I am a large language model, trained by Google. I do not have a name. \n",
"I don't have a name like a person does. You can call me Bard if you like! 😊 \n",
"\n",
"\n"
]
@ -89,14 +140,21 @@
"source": [
"## Inferring model provider\n",
"\n",
"For common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://python.langchain.com/v0.2/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`."
"For common and distinct model names `init_chat_model()` will attempt to infer the model provider. See the [API reference](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) for a full list of inference behavior. E.g. any model that starts with `gpt-3...` or `gpt-4...` will be inferred as using model provider `openai`."
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "0378ccc6-95bc-4d50-be50-fccc193f0a71",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:39.396908Z",
"iopub.status.busy": "2024-09-10T20:22:39.396563Z",
"iopub.status.idle": "2024-09-10T20:22:39.444959Z",
"shell.execute_reply": "2024-09-10T20:22:39.444646Z"
}
},
"outputs": [],
"source": [
"gpt_4o = init_chat_model(\"gpt-4o\", temperature=0)\n",
@ -116,17 +174,24 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:39.446901Z",
"iopub.status.busy": "2024-09-10T20:22:39.446773Z",
"iopub.status.idle": "2024-09-10T20:22:40.301906Z",
"shell.execute_reply": "2024-09-10T20:22:40.300918Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_d576307f90', 'finish_reason': 'stop', 'logprobs': None}, id='run-5428ab5c-b5c0-46de-9946-5d4ca40dbdc8-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
"AIMessage(content=\"I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 11, 'total_tokens': 34}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_25624ae3a5', 'finish_reason': 'stop', 'logprobs': None}, id='run-b41df187-4627-490d-af3c-1c96282d3eb0-0', usage_metadata={'input_tokens': 11, 'output_tokens': 23, 'total_tokens': 34})"
]
},
"execution_count": 5,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@ -141,17 +206,24 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:40.316030Z",
"iopub.status.busy": "2024-09-10T20:22:40.315628Z",
"iopub.status.idle": "2024-09-10T20:22:41.199134Z",
"shell.execute_reply": "2024-09-10T20:22:41.198173Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_012XvotUJ3kGLXJUWKBVxJUi', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-1ad1eefe-f1c6-4244-8bc6-90e2cb7ee554-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01Fx9P74A7syoFkwE73CdMMY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-a0fd2bbd-3b7e-46bf-8d69-a48c7e60b03c-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
]
},
"execution_count": 6,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@ -174,17 +246,24 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 6,
"id": "814a2289-d0db-401e-b555-d5116112b413",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:41.203346Z",
"iopub.status.busy": "2024-09-10T20:22:41.203004Z",
"iopub.status.idle": "2024-09-10T20:22:41.891450Z",
"shell.execute_reply": "2024-09-10T20:22:41.890539Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_ce0793330f', 'finish_reason': 'stop', 'logprobs': None}, id='run-3923e328-7715-4cd6-b215-98e4b6bf7c9d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
"AIMessage(content=\"I'm an AI created by OpenAI, and I don't have a personal name. How can I assist you today?\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 23, 'prompt_tokens': 11, 'total_tokens': 34}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_25624ae3a5', 'finish_reason': 'stop', 'logprobs': None}, id='run-3380f977-4b89-4f44-bc02-b64043b3166f-0', usage_metadata={'input_tokens': 11, 'output_tokens': 23, 'total_tokens': 34})"
]
},
"execution_count": 9,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@ -202,17 +281,24 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 7,
"id": "6c8755ba-c001-4f5a-a497-be3f1db83244",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:41.896413Z",
"iopub.status.busy": "2024-09-10T20:22:41.895967Z",
"iopub.status.idle": "2024-09-10T20:22:42.767565Z",
"shell.execute_reply": "2024-09-10T20:22:42.766619Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_01RyYR64DoMPNCfHeNnroMXm', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-22446159-3723-43e6-88df-b84797e7751d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", additional_kwargs={}, response_metadata={'id': 'msg_01EFKSWpmsn2PSYPQa4cNHWb', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-3c58f47c-41b9-4e56-92e7-fb9602e3787c-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
]
},
"execution_count": 10,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -242,28 +328,37 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"id": "067dabee-1050-4110-ae24-c48eba01e13b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:42.771941Z",
"iopub.status.busy": "2024-09-10T20:22:42.771606Z",
"iopub.status.idle": "2024-09-10T20:22:43.909206Z",
"shell.execute_reply": "2024-09-10T20:22:43.908496Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetPopulation',\n",
" 'args': {'location': 'Los Angeles, CA'},\n",
" 'id': 'call_sYT3PFMufHGWJD32Hi2CTNUP'},\n",
" 'id': 'call_Ga9m8FAArIyEjItHmztPYA22',\n",
" 'type': 'tool_call'},\n",
" {'name': 'GetPopulation',\n",
" 'args': {'location': 'New York, NY'},\n",
" 'id': 'call_j1qjhxRnD3ffQmRyqjlI1Lnk'}]"
" 'id': 'call_jh2dEvBaAHRaw5JUDthOs7rt',\n",
" 'type': 'tool_call'}]"
]
},
"execution_count": 7,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",
@ -288,22 +383,31 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "e57dfe9f-cd24-4e37-9ce9-ccf8daf78f89",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:43.912746Z",
"iopub.status.busy": "2024-09-10T20:22:43.912447Z",
"iopub.status.idle": "2024-09-10T20:22:46.437049Z",
"shell.execute_reply": "2024-09-10T20:22:46.436093Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetPopulation',\n",
" 'args': {'location': 'Los Angeles, CA'},\n",
" 'id': 'toolu_01CxEHxKtVbLBrvzFS7GQ5xR'},\n",
" 'id': 'toolu_01JMufPf4F4t2zLj7miFeqXp',\n",
" 'type': 'tool_call'},\n",
" {'name': 'GetPopulation',\n",
" 'args': {'location': 'New York City, NY'},\n",
" 'id': 'toolu_013A79qt5toWSsKunFBDZd5S'}]"
" 'id': 'toolu_01RQBHcE8kEEbYTuuS8WqY1u',\n",
" 'type': 'tool_call'}]"
]
},
"execution_count": 8,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}

View File

@ -18,7 +18,7 @@
"# How to stream chat model responses\n",
"\n",
"\n",
"All [chat models](https://python.langchain.com/v0.2/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) implement the [Runnable interface](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable), which comes with a **default** implementations of standard runnable methods (i.e. `ainvoke`, `batch`, `abatch`, `stream`, `astream`, `astream_events`).\n",
"All [chat models](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) implement the [Runnable interface](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable), which comes with a **default** implementations of standard runnable methods (i.e. `ainvoke`, `batch`, `abatch`, `stream`, `astream`, `astream_events`).\n",
"\n",
"The **default** streaming implementation provides an`Iterator` (or `AsyncIterator` for asynchronous streaming) that yields a single value: the final output from the underlying chat model provider.\n",
"\n",
@ -120,7 +120,7 @@
"source": [
"## Astream events\n",
"\n",
"Chat models also support the standard [astream events](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events) method.\n",
"Chat models also support the standard [astream events](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events) method.\n",
"\n",
"This method is useful if you're streaming output from a larger LLM application that contains multiple steps (e.g., an LLM chain composed of a prompt, llm and parser)."
]

View File

@ -42,7 +42,7 @@
"\n",
"A number of model providers return token usage information as part of the chat generation response. When available, this information will be included on the `AIMessage` objects produced by the corresponding model.\n",
"\n",
"LangChain `AIMessage` objects include a [usage_metadata](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `\"input_tokens\"` and `\"output_tokens\"`).\n",
"LangChain `AIMessage` objects include a [usage_metadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.usage_metadata) attribute. When populated, this attribute will be a [UsageMetadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.UsageMetadata.html) dictionary with standard keys (e.g., `\"input_tokens\"` and `\"output_tokens\"`).\n",
"\n",
"Examples:\n",
"\n",
@ -71,7 +71,7 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
"openai_response = llm.invoke(\"hello\")\n",
"openai_response.usage_metadata"
]
@ -118,7 +118,7 @@
"source": [
"### Using AIMessage.response_metadata\n",
"\n",
"Metadata from the model response is also included in the AIMessage [response_metadata](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.response_metadata) attribute. These data are typically not standardized. Note that different providers adopt different conventions for representing token counts:"
"Metadata from the model response is also included in the AIMessage [response_metadata](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.response_metadata) attribute. These data are typically not standardized. Note that different providers adopt different conventions for representing token counts:"
]
},
{
@ -153,7 +153,7 @@
"\n",
"#### OpenAI\n",
"\n",
"For example, OpenAI will return a message [chunk](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
"For example, OpenAI will return a message [chunk](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
"\n",
"```{=mdx}\n",
":::note\n",
@ -182,13 +182,13 @@
"content=' you' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' today' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='?' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-4o-mini'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
]
}
],
"source": [
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
"\n",
"aggregate = None\n",
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
@ -252,7 +252,7 @@
"content=' you' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' today' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='?' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-4o-mini'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
]
}
],
@ -289,7 +289,7 @@
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Joke(BaseModel):\n",
@ -300,7 +300,7 @@
"\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" model=\"gpt-4o-mini\",\n",
" stream_usage=True,\n",
")\n",
"# Under the hood, .with_structured_output binds tools to the\n",
@ -362,7 +362,7 @@
"from langchain_community.callbacks.manager import get_openai_callback\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" model=\"gpt-4o-mini\",\n",
" temperature=0,\n",
" stream_usage=True,\n",
")\n",

View File

@ -77,7 +77,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")"
"chat = ChatOpenAI(model=\"gpt-4o-mini\")"
]
},
{
@ -140,7 +140,7 @@
"\n",
"## Chat history\n",
"\n",
"It's perfectly fine to store and pass messages directly as an array, but we can use LangChain's built-in [message history class](https://python.langchain.com/v0.2/api_reference/langchain/index.html#module-langchain.memory) to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers - you can see a [list of integrations here](/docs/integrations/memory) - but for this demo we will use an ephemeral demo class.\n",
"It's perfectly fine to store and pass messages directly as an array, but we can use LangChain's built-in [message history class](https://python.langchain.com/api_reference/langchain/index.html#module-langchain.memory) to store and load messages as well. Instances of this class are responsible for storing and loading chat messages from persistent storage. LangChain integrates with many providers - you can see a [list of integrations here](/docs/integrations/memory) - but for this demo we will use an ephemeral demo class.\n",
"\n",
"Here's an example of the API:"
]
@ -191,7 +191,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
"AIMessage(content='You just asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 61, 'total_tokens': 79}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5cbb21c2-9c30-4031-8ea8-bfc497989535-0', usage_metadata={'input_tokens': 61, 'output_tokens': 18, 'total_tokens': 79})"
]
},
"execution_count": 5,
@ -312,7 +312,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
"AIMessage(content='\"J\\'adore la programmation.\"', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 39, 'total_tokens': 48}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-648b0822-b0bb-47a2-8e7d-7d34744be8f2-0', usage_metadata={'input_tokens': 39, 'output_tokens': 9, 'total_tokens': 48})"
]
},
"execution_count": 8,
@ -342,7 +342,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
"AIMessage(content='You asked me to translate the sentence \"I love programming\" from English to French.', response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 63, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5950435c-1dc2-43a6-836f-f989fd62c95e-0', usage_metadata={'input_tokens': 63, 'output_tokens': 17, 'total_tokens': 80})"
]
},
"execution_count": 9,
@ -421,7 +421,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
"AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72})"
]
},
"execution_count": 22,
@ -501,7 +501,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
"AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})"
]
},
"execution_count": 24,
@ -529,9 +529,9 @@
" HumanMessage(content='How are you today?'),\n",
" AIMessage(content='Fine thanks!'),\n",
" HumanMessage(content=\"What's my name?\"),\n",
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
" AIMessage(content='Your name is Nemo.', response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 66, 'total_tokens': 72}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-f8aabef8-631a-4238-a39b-701e881fbe47-0', usage_metadata={'input_tokens': 66, 'output_tokens': 6, 'total_tokens': 72}),\n",
" HumanMessage(content='Where does P. Sherman live?'),\n",
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
" AIMessage(content='P. Sherman is a fictional character from the animated movie \"Finding Nemo\" who lives at 42 Wallaby Way, Sydney.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 53, 'total_tokens': 80}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5642ef3a-fdbe-43cf-a575-d1785976a1b9-0', usage_metadata={'input_tokens': 53, 'output_tokens': 27, 'total_tokens': 80})]"
]
},
"execution_count": 25,
@ -565,7 +565,7 @@
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
"AIMessage(content=\"I'm sorry, but I don't have access to your personal information, so I don't know your name. How else may I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 31, 'prompt_tokens': 74, 'total_tokens': 105}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-0ab03495-1f7c-4151-9070-56d2d1c565ff-0', usage_metadata={'input_tokens': 74, 'output_tokens': 31, 'total_tokens': 105})"
]
},
"execution_count": 27,

View File

@ -71,7 +71,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0.2)"
"chat = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0.2)"
]
},
{

View File

@ -70,7 +70,7 @@
"\n",
"# Choose the LLM that will drive the agent\n",
"# Only certain models support this\n",
"chat = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)"
"chat = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{

View File

@ -7,7 +7,7 @@
"source": [
"# How to split code\n",
"\n",
"[RecursiveCharacterTextSplitter](https://python.langchain.com/v0.2/api_reference/text_splitters/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for splitting text in a specific programming language.\n",
"[RecursiveCharacterTextSplitter](https://python.langchain.com/api_reference/text_splitters/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for splitting text in a specific programming language.\n",
"\n",
"Supported languages are stored in the `langchain_text_splitters.Language` enum. They include:\n",
"\n",

View File

@ -58,7 +58,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@ -99,7 +100,7 @@
"id": "b0f74589",
"metadata": {},
"source": [
"Above, we defined `temperature` as a [`ConfigurableField`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.utils.ConfigurableField.html#langchain_core.runnables.utils.ConfigurableField) that we can set at runtime. To do so, we use the [`with_config`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method like this:"
"Above, we defined `temperature` as a [`ConfigurableField`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.utils.ConfigurableField.html#langchain_core.runnables.utils.ConfigurableField) that we can set at runtime. To do so, we use the [`with_config`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method like this:"
]
},
{
@ -281,7 +282,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
]
},
{

View File

@ -227,7 +227,7 @@
"source": [
"### `LLMListwiseRerank`\n",
"\n",
"[LLMListwiseRerank](https://python.langchain.com/v0.2/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
"[LLMListwiseRerank](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
"\n",
"Note that `LLMListwiseRerank` requires a model with the [with_structured_output](/docs/integrations/chat/) method implemented."
]
@ -258,7 +258,7 @@
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"\n",
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
"compression_retriever = ContextualCompressionRetriever(\n",

View File

@ -42,13 +42,13 @@
"source": [
"LangChain [tools](/docs/concepts#tools) are interfaces that an agent, chain, or chat model can use to interact with the world. See [here](/docs/how_to/#tools) for how-to guides covering tool-calling, built-in tools, custom tools, and more information.\n",
"\n",
"LangChain tools-- instances of [BaseTool](https://python.langchain.com/v0.2/api_reference/core/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
"LangChain tools-- instances of [BaseTool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
"\n",
"- Their inputs are constrained to be serializable, specifically strings and Python `dict` objects;\n",
"- They contain names and descriptions indicating how and when they should be used;\n",
"- They may contain a detailed [args_schema](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
"- They may contain a detailed [args_schema](https://python.langchain.com/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
"\n",
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
]
},
{
@ -180,7 +180,7 @@
"id": "32b1a992-8997-4c98-8eb2-c9fe9431b799",
"metadata": {},
"source": [
"Alternatively, the schema can be fully specified by directly passing the desired [args_schema](https://python.langchain.com/v0.2/api_reference/core/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool.args_schema) for the tool:"
"Alternatively, the schema can be fully specified by directly passing the desired [args_schema](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool.args_schema) for the tool:"
]
},
{
@ -190,7 +190,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class GSchema(BaseModel):\n",
@ -285,7 +285,7 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{
@ -331,7 +331,7 @@
"id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1",
"metadata": {},
"source": [
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/v0.2/docs/tutorials/agents/) and provide it the tool:"
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/docs/tutorials/agents/) and provide it the tool:"
]
},
{
@ -362,11 +362,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content=\"[Document(id='86f835fe-4bbe-4ec6-aeb4-489a8b541707', page_content='Dogs are great companions, known for their loyalty and friendliness.')]\", name='pet_info_retriever', tool_call_id='call_W8cnfOjwqEn4cFcg19LN9mYD')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
"----\n"
]
}
@ -497,11 +497,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content='\"Dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.\"', name='pet_expert', tool_call_id='call_17iLPWvOD23zqwd1QVQ00Y63')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
"----\n"
]
}

View File

@ -16,7 +16,7 @@
"\n",
"LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n",
"\n",
"To create a custom callback handler, we need to determine the [event(s)](https://python.langchain.com/v0.2/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n",
"To create a custom callback handler, we need to determine the [event(s)](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n",
"\n",
"In the example below, we'll implement streaming with a custom handler.\n",
"\n",
@ -107,7 +107,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"You can see [this reference page](https://python.langchain.com/v0.2/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n",
"You can see [this reference page](https://python.langchain.com/api_reference/core/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n",
"\n",
"## Next steps\n",
"\n",

View File

@ -16,7 +16,7 @@
"\n",
"In this guide, we'll learn how to create a custom chat model using LangChain abstractions.\n",
"\n",
"Wrapping your LLM with the standard [`BaseChatModel`](https://python.langchain.com/v0.2/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface allow you to use your LLM in existing LangChain programs with minimal code modifications!\n",
"Wrapping your LLM with the standard [`BaseChatModel`](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface allow you to use your LLM in existing LangChain programs with minimal code modifications!\n",
"\n",
"As an bonus, your LLM will automatically become a LangChain `Runnable` and will benefit from some optimizations out of the box (e.g., batch via a threadpool), async support, the `astream_events` API, etc.\n",
"\n",
@ -503,7 +503,7 @@
"\n",
"Documentation:\n",
"\n",
"* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://python.langchain.com/v0.2/api_reference/langchain/index.html).\n",
"* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://python.langchain.com/api_reference/langchain/index.html).\n",
"* The class doc-string for the model contains a link to the model API if the model is powered by a service.\n",
"\n",
"Tests:\n",

View File

@ -402,7 +402,7 @@
"\n",
"Documentation:\n",
"\n",
"* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://python.langchain.com/v0.2/api_reference/langchain/index.html).\n",
"* The model contains doc-strings for all initialization arguments, as these will be surfaced in the [APIReference](https://python.langchain.com/api_reference/langchain/index.html).\n",
"* The class doc-string for the model contains a link to the model API if the model is powered by a service.\n",
"\n",
"Tests:\n",

View File

@ -270,7 +270,7 @@
"\n",
"Documentation:\n",
"\n",
"* The retriever contains doc-strings for all initialization arguments, as these will be surfaced in the [API Reference](https://python.langchain.com/v0.2/api_reference/langchain/index.html).\n",
"* The retriever contains doc-strings for all initialization arguments, as these will be surfaced in the [API Reference](https://python.langchain.com/api_reference/langchain/index.html).\n",
"* The class doc-string for the model contains a link to any relevant APIs used for the retriever (e.g., if the retriever is retrieving from wikipedia, it'll be good to link to the wikipedia API!)\n",
"\n",
"Tests:\n",

View File

@ -13,16 +13,16 @@
"|---------------|---------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
"| name | str | Must be unique within a set of tools provided to an LLM or agent. |\n",
"| description | str | Describes what the tool does. Used as context by the LLM or agent. |\n",
"| args_schema | langchain.pydantic_v1.BaseModel | Optional but recommended, and required if using callback handlers. It can be used to provide more information (e.g., few-shot examples) or validation for expected parameters. |\n",
"| args_schema | pydantic.BaseModel | Optional but recommended, and required if using callback handlers. It can be used to provide more information (e.g., few-shot examples) or validation for expected parameters. |\n",
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
"\n",
"LangChain supports the creation of tools from:\n",
"\n",
"1. Functions;\n",
"2. LangChain [Runnables](/docs/concepts#runnable-interface);\n",
"3. By sub-classing from [BaseTool](https://python.langchain.com/v0.2/api_reference/core/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
"3. By sub-classing from [BaseTool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
"\n",
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://python.langchain.com/v0.2/api_reference/core/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://python.langchain.com/v0.2/api_reference/core/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
"\n",
"In this guide we provide an overview of these methods.\n",
"\n",
@ -48,7 +48,14 @@
"cell_type": "code",
"execution_count": 1,
"id": "cc7005cd-072f-4d37-8453-6297468e5192",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:52.645451Z",
"iopub.status.busy": "2024-09-10T20:25:52.645081Z",
"iopub.status.idle": "2024-09-10T20:25:53.030958Z",
"shell.execute_reply": "2024-09-10T20:25:53.030669Z"
}
},
"outputs": [
{
"name": "stdout",
@ -88,7 +95,14 @@
"cell_type": "code",
"execution_count": 2,
"id": "0c0991db-b997-4611-be37-4346e660506b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.032544Z",
"iopub.status.busy": "2024-09-10T20:25:53.032420Z",
"iopub.status.idle": "2024-09-10T20:25:53.035349Z",
"shell.execute_reply": "2024-09-10T20:25:53.035123Z"
}
},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
@ -112,22 +126,29 @@
"cell_type": "code",
"execution_count": 3,
"id": "5626423f-053e-4a66-adca-1d794d835397",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.036658Z",
"iopub.status.busy": "2024-09-10T20:25:53.036574Z",
"iopub.status.idle": "2024-09-10T20:25:53.041154Z",
"shell.execute_reply": "2024-09-10T20:25:53.040964Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'multiply_by_maxSchema',\n",
" 'description': 'Multiply a by the maximum of b.',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A',\n",
" 'description': 'scale factor',\n",
"{'description': 'Multiply a by the maximum of b.',\n",
" 'properties': {'a': {'description': 'scale factor',\n",
" 'title': 'A',\n",
" 'type': 'string'},\n",
" 'b': {'title': 'B',\n",
" 'description': 'list of ints over which to take maximum',\n",
" 'type': 'array',\n",
" 'items': {'type': 'integer'}}},\n",
" 'required': ['a', 'b']}"
" 'b': {'description': 'list of ints over which to take maximum',\n",
" 'items': {'type': 'integer'},\n",
" 'title': 'B',\n",
" 'type': 'array'}},\n",
" 'required': ['a', 'b'],\n",
" 'title': 'multiply_by_maxSchema',\n",
" 'type': 'object'}"
]
},
"execution_count": 3,
@ -163,7 +184,14 @@
"cell_type": "code",
"execution_count": 4,
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.042516Z",
"iopub.status.busy": "2024-09-10T20:25:53.042427Z",
"iopub.status.idle": "2024-09-10T20:25:53.045217Z",
"shell.execute_reply": "2024-09-10T20:25:53.045010Z"
}
},
"outputs": [
{
"name": "stdout",
@ -171,13 +199,13 @@
"text": [
"multiplication-tool\n",
"Multiply two numbers.\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n",
"True\n"
]
}
],
"source": [
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class CalculatorInput(BaseModel):\n",
@ -218,19 +246,26 @@
"cell_type": "code",
"execution_count": 5,
"id": "336f5538-956e-47d5-9bde-b732559f9e61",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.046526Z",
"iopub.status.busy": "2024-09-10T20:25:53.046456Z",
"iopub.status.idle": "2024-09-10T20:25:53.050045Z",
"shell.execute_reply": "2024-09-10T20:25:53.049836Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'fooSchema',\n",
" 'description': 'The foo.',\n",
" 'type': 'object',\n",
" 'properties': {'bar': {'title': 'Bar',\n",
" 'description': 'The bar.',\n",
"{'description': 'The foo.',\n",
" 'properties': {'bar': {'description': 'The bar.',\n",
" 'title': 'Bar',\n",
" 'type': 'string'},\n",
" 'baz': {'title': 'Baz', 'description': 'The baz.', 'type': 'integer'}},\n",
" 'required': ['bar', 'baz']}"
" 'baz': {'description': 'The baz.', 'title': 'Baz', 'type': 'integer'}},\n",
" 'required': ['bar', 'baz'],\n",
" 'title': 'fooSchema',\n",
" 'type': 'object'}"
]
},
"execution_count": 5,
@ -259,7 +294,7 @@
"metadata": {},
"source": [
":::{.callout-caution}\n",
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://python.langchain.com/v0.2/api_reference/core/tools/langchain_core.tools.tool.html) for detail and examples.\n",
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.tool.html) for detail and examples.\n",
":::"
]
},
@ -277,7 +312,14 @@
"cell_type": "code",
"execution_count": 6,
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.051302Z",
"iopub.status.busy": "2024-09-10T20:25:53.051218Z",
"iopub.status.idle": "2024-09-10T20:25:53.059704Z",
"shell.execute_reply": "2024-09-10T20:25:53.059490Z"
}
},
"outputs": [
{
"name": "stdout",
@ -320,7 +362,14 @@
"cell_type": "code",
"execution_count": 7,
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.060971Z",
"iopub.status.busy": "2024-09-10T20:25:53.060883Z",
"iopub.status.idle": "2024-09-10T20:25:53.064615Z",
"shell.execute_reply": "2024-09-10T20:25:53.064408Z"
}
},
"outputs": [
{
"name": "stdout",
@ -329,7 +378,7 @@
"6\n",
"Calculator\n",
"multiply numbers\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n"
]
}
],
@ -366,24 +415,39 @@
"source": [
"## Creating tools from Runnables\n",
"\n",
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
"\n",
"Example usage:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"id": "8ef593c5-cf72-4c10-bfc9-7d21874a0c24",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.065797Z",
"iopub.status.busy": "2024-09-10T20:25:53.065733Z",
"iopub.status.idle": "2024-09-10T20:25:53.130458Z",
"shell.execute_reply": "2024-09-10T20:25:53.130229Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/4j/2rz3865x6qg07tx43146py8h0000gn/T/ipykernel_95770/2548361071.py:14: LangChainBetaWarning: This API is in beta and may change in the future.\n",
" as_tool = chain.as_tool(\n"
]
},
{
"data": {
"text/plain": [
"{'answer_style': {'title': 'Answer Style', 'type': 'string'}}"
]
},
"execution_count": 9,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@ -428,19 +492,26 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 9,
"id": "1dad8f8e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.131904Z",
"iopub.status.busy": "2024-09-10T20:25:53.131803Z",
"iopub.status.idle": "2024-09-10T20:25:53.136797Z",
"shell.execute_reply": "2024-09-10T20:25:53.136563Z"
}
},
"outputs": [],
"source": [
"from typing import Optional, Type\n",
"\n",
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain_core.callbacks import (\n",
" AsyncCallbackManagerForToolRun,\n",
" CallbackManagerForToolRun,\n",
")\n",
"from langchain_core.tools import BaseTool\n",
"from pydantic import BaseModel\n",
"\n",
"\n",
"class CalculatorInput(BaseModel):\n",
@ -448,9 +519,11 @@
" b: int = Field(description=\"second number\")\n",
"\n",
"\n",
"# Note: It's important that every field has type hints. BaseTool is a\n",
"# Pydantic class and not having type hints can lead to unexpected behavior.\n",
"class CustomCalculatorTool(BaseTool):\n",
" name = \"Calculator\"\n",
" description = \"useful for when you need to answer questions about math\"\n",
" name: str = \"Calculator\"\n",
" description: str = \"useful for when you need to answer questions about math\"\n",
" args_schema: Type[BaseModel] = CalculatorInput\n",
" return_direct: bool = True\n",
"\n",
@ -477,9 +550,16 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 10,
"id": "bb551c33",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.138074Z",
"iopub.status.busy": "2024-09-10T20:25:53.138007Z",
"iopub.status.idle": "2024-09-10T20:25:53.141360Z",
"shell.execute_reply": "2024-09-10T20:25:53.141158Z"
}
},
"outputs": [
{
"name": "stdout",
@ -487,7 +567,7 @@
"text": [
"Calculator\n",
"useful for when you need to answer questions about math\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"{'a': {'description': 'first number', 'title': 'A', 'type': 'integer'}, 'b': {'description': 'second number', 'title': 'B', 'type': 'integer'}}\n",
"True\n",
"6\n",
"6\n"
@ -512,7 +592,7 @@
"source": [
"## How to create async tools\n",
"\n",
"LangChain Tools implement the [Runnable interface 🏃](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html).\n",
"LangChain Tools implement the [Runnable interface 🏃](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html).\n",
"\n",
"All Runnables expose the `invoke` and `ainvoke` methods (as well as other methods like `batch`, `abatch`, `astream` etc).\n",
"\n",
@ -528,9 +608,16 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 11,
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.142587Z",
"iopub.status.busy": "2024-09-10T20:25:53.142504Z",
"iopub.status.idle": "2024-09-10T20:25:53.147205Z",
"shell.execute_reply": "2024-09-10T20:25:53.146995Z"
}
},
"outputs": [
{
"name": "stdout",
@ -560,9 +647,16 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 12,
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.148383Z",
"iopub.status.busy": "2024-09-10T20:25:53.148307Z",
"iopub.status.idle": "2024-09-10T20:25:53.152684Z",
"shell.execute_reply": "2024-09-10T20:25:53.152486Z"
}
},
"outputs": [
{
"name": "stdout",
@ -605,9 +699,16 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 13,
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.153849Z",
"iopub.status.busy": "2024-09-10T20:25:53.153773Z",
"iopub.status.idle": "2024-09-10T20:25:53.158312Z",
"shell.execute_reply": "2024-09-10T20:25:53.158130Z"
}
},
"outputs": [
{
"name": "stdout",
@ -650,9 +751,16 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 14,
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.159440Z",
"iopub.status.busy": "2024-09-10T20:25:53.159364Z",
"iopub.status.idle": "2024-09-10T20:25:53.160922Z",
"shell.execute_reply": "2024-09-10T20:25:53.160712Z"
}
},
"outputs": [],
"source": [
"from langchain_core.tools import ToolException\n",
@ -673,9 +781,16 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 15,
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.162046Z",
"iopub.status.busy": "2024-09-10T20:25:53.161968Z",
"iopub.status.idle": "2024-09-10T20:25:53.165236Z",
"shell.execute_reply": "2024-09-10T20:25:53.165052Z"
}
},
"outputs": [
{
"data": {
@ -683,7 +798,7 @@
"'Error: There is no city by the name of foobar.'"
]
},
"execution_count": 16,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
@ -707,9 +822,16 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 16,
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.166372Z",
"iopub.status.busy": "2024-09-10T20:25:53.166294Z",
"iopub.status.idle": "2024-09-10T20:25:53.169739Z",
"shell.execute_reply": "2024-09-10T20:25:53.169553Z"
}
},
"outputs": [
{
"data": {
@ -717,7 +839,7 @@
"\"There is no such city, but it's probably above 0K there!\""
]
},
"execution_count": 17,
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@ -741,9 +863,16 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 17,
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.170937Z",
"iopub.status.busy": "2024-09-10T20:25:53.170859Z",
"iopub.status.idle": "2024-09-10T20:25:53.174498Z",
"shell.execute_reply": "2024-09-10T20:25:53.174304Z"
}
},
"outputs": [
{
"data": {
@ -751,7 +880,7 @@
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
]
},
"execution_count": 18,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
@ -778,7 +907,7 @@
"\n",
"Sometimes there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns custom objects like Documents, we may want to pass some view or metadata about this output to the model without passing the raw output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
"\n",
"The Tool and [ToolMessage](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
"The Tool and [ToolMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
"\n",
":::info Requires ``langchain-core >= 0.2.19``\n",
"\n",
@ -791,9 +920,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 18,
"id": "14905425-0334-43a0-9de9-5bcf622ede0e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.175683Z",
"iopub.status.busy": "2024-09-10T20:25:53.175605Z",
"iopub.status.idle": "2024-09-10T20:25:53.178798Z",
"shell.execute_reply": "2024-09-10T20:25:53.178601Z"
}
},
"outputs": [],
"source": [
"import random\n",
@ -820,9 +956,16 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 19,
"id": "0f2e1528-404b-46e6-b87c-f0957c4b9217",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.179881Z",
"iopub.status.busy": "2024-09-10T20:25:53.179807Z",
"iopub.status.idle": "2024-09-10T20:25:53.182100Z",
"shell.execute_reply": "2024-09-10T20:25:53.181940Z"
}
},
"outputs": [
{
"data": {
@ -830,7 +973,7 @@
"'Successfully generated array of 10 random ints in [0, 9].'"
]
},
"execution_count": 9,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
@ -849,17 +992,24 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 20,
"id": "cc197777-26eb-46b3-a83b-c2ce116c6311",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.183238Z",
"iopub.status.busy": "2024-09-10T20:25:53.183170Z",
"iopub.status.idle": "2024-09-10T20:25:53.185752Z",
"shell.execute_reply": "2024-09-10T20:25:53.185567Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[1, 4, 2, 5, 3, 9, 0, 4, 7, 7])"
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[4, 8, 2, 4, 1, 0, 9, 5, 8, 1])"
]
},
"execution_count": 3,
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
@ -885,9 +1035,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 21,
"id": "fe1a09d1-378b-4b91-bb5e-0697c3d7eb92",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.186884Z",
"iopub.status.busy": "2024-09-10T20:25:53.186803Z",
"iopub.status.idle": "2024-09-10T20:25:53.190718Z",
"shell.execute_reply": "2024-09-10T20:25:53.190494Z"
}
},
"outputs": [],
"source": [
"from langchain_core.tools import BaseTool\n",
@ -917,17 +1074,24 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 22,
"id": "8c3d16f6-1c4a-48ab-b05a-38547c592e79",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:25:53.191872Z",
"iopub.status.busy": "2024-09-10T20:25:53.191794Z",
"iopub.status.idle": "2024-09-10T20:25:53.194396Z",
"shell.execute_reply": "2024-09-10T20:25:53.194184Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.4277, 0.7578, 2.4871])"
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.5566, 0.5134, 2.7914])"
]
},
"execution_count": 8,
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}

View File

@ -9,7 +9,7 @@
"\n",
"A [comma-separated values (CSV)](https://en.wikipedia.org/wiki/Comma-separated_values) file is a delimited text file that uses a comma to separate values. Each line of the file is a data record. Each record consists of one or more fields, separated by commas.\n",
"\n",
"LangChain implements a [CSV Loader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html) that will load CSV files into a sequence of [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Each row of the CSV file is translated to one document."
"LangChain implements a [CSV Loader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.csv_loader.CSVLoader.html) that will load CSV files into a sequence of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Each row of the CSV file is translated to one document."
]
},
{
@ -88,7 +88,7 @@
"source": [
"## Specify a column to identify the document source\n",
"\n",
"The `\"source\"` key on [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) metadata can be set using a column of the CSV. Use the `source_column` argument to specify a source for the document created from each row. Otherwise `file_path` will be used as the source for all documents created from the CSV file.\n",
"The `\"source\"` key on [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) metadata can be set using a column of the CSV. Use the `source_column` argument to specify a source for the document created from each row. Otherwise `file_path` will be used as the source for all documents created from the CSV file.\n",
"\n",
"This is useful when using documents loaded from CSV files for chains that answer questions using sources."
]

View File

@ -7,7 +7,7 @@
"source": [
"# How to load documents from a directory\n",
"\n",
"LangChain's [DirectoryLoader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html) implements functionality for reading files from disk into LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Here we demonstrate:\n",
"LangChain's [DirectoryLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.directory.DirectoryLoader.html) implements functionality for reading files from disk into LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects. Here we demonstrate:\n",
"\n",
"- How to load from a filesystem, including use of wildcard patterns;\n",
"- How to use multithreading for file I/O;\n",
@ -134,7 +134,7 @@
"metadata": {},
"source": [
"## Change loader class\n",
"By default this uses the `UnstructuredLoader` class. To customize the loader, specify the loader class in the `loader_cls` kwarg. Below we show an example using [TextLoader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.text.TextLoader.html):"
"By default this uses the `UnstructuredLoader` class. To customize the loader, specify the loader class in the `loader_cls` kwarg. Below we show an example using [TextLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.text.TextLoader.html):"
]
},
{

View File

@ -9,7 +9,7 @@
"\n",
"The HyperText Markup Language or [HTML](https://en.wikipedia.org/wiki/HTML) is the standard markup language for documents designed to be displayed in a web browser.\n",
"\n",
"This covers how to load `HTML` documents into a LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
"This covers how to load `HTML` documents into a LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
"\n",
"Parsing HTML files often requires specialized tools. Here we demonstrate parsing via [Unstructured](https://unstructured-io.github.io/unstructured/) and [BeautifulSoup4](https://beautiful-soup-4.readthedocs.io/en/latest/), which can be installed via pip. Head over to the integrations page to find integrations with additional services, such as [Azure AI Document Intelligence](/docs/integrations/document_loaders/azure_document_intelligence) or [FireCrawl](/docs/integrations/document_loaders/firecrawl).\n",
"\n",

View File

@ -4,8 +4,8 @@
[JSON Lines](https://jsonlines.org/) is a file format where each line is a valid JSON value.
LangChain implements a [JSONLoader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html)
to convert JSON and JSONL data into LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
LangChain implements a [JSONLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.json_loader.JSONLoader.html)
to convert JSON and JSONL data into LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
objects. It uses a specified [jq schema](https://en.wikipedia.org/wiki/Jq_(programming_language)) to parse the JSON files, allowing for the extraction of specific fields into the content
and metadata of the LangChain Document.

View File

@ -9,14 +9,14 @@
"\n",
"[Markdown](https://en.wikipedia.org/wiki/Markdown) is a lightweight markup language for creating formatted text using a plain-text editor.\n",
"\n",
"Here we cover how to load `Markdown` documents into LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
"Here we cover how to load `Markdown` documents into LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects that we can use downstream.\n",
"\n",
"We will cover:\n",
"\n",
"- Basic usage;\n",
"- Parsing of Markdown into elements such as titles, list items, and text.\n",
"\n",
"LangChain implements an [UnstructuredMarkdownLoader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) object which requires the [Unstructured](https://unstructured-io.github.io/unstructured/) package. First we install it:"
"LangChain implements an [UnstructuredMarkdownLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.markdown.UnstructuredMarkdownLoader.html) object which requires the [Unstructured](https://unstructured-io.github.io/unstructured/) package. First we install it:"
]
},
{

View File

@ -3,7 +3,7 @@
The [Microsoft Office](https://www.office.com/) suite of productivity software includes Microsoft Word, Microsoft Excel, Microsoft PowerPoint, Microsoft Outlook, and Microsoft OneNote. It is available for Microsoft Windows and macOS operating systems. It is also available on Android and iOS.
This covers how to load commonly used file formats including `DOCX`, `XLSX` and `PPTX` documents into a LangChain
[Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
[Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document)
object that we can use downstream.

View File

@ -9,7 +9,7 @@
"\n",
"[Portable Document Format (PDF)](https://en.wikipedia.org/wiki/PDF), standardized as ISO 32000, is a file format developed by Adobe in 1992 to present documents, including text formatting and images, in a manner independent of application software, hardware, and operating systems.\n",
"\n",
"This guide covers how to load `PDF` documents into the LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) format that we use downstream.\n",
"This guide covers how to load `PDF` documents into the LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) format that we use downstream.\n",
"\n",
"LangChain integrates with a host of PDF parsers. Some are simple and relatively low-level; others will support OCR and image-processing, or perform advanced document layout analysis. The right choice will depend on your application. Below we enumerate the possibilities.\n",
"\n",
@ -90,7 +90,8 @@
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
]
},
{
@ -182,7 +183,7 @@
"source": [
"## Using other PDF loaders\n",
"\n",
"For a list of other PDF loaders to use, please see [this table](https://python.langchain.com/v0.2/docs/integrations/document_loaders/#pdfs)"
"For a list of other PDF loaders to use, please see [this table](https://python.langchain.com/docs/integrations/document_loaders/#pdfs)"
]
}
],

View File

@ -6,7 +6,7 @@
"source": [
"# How to combine results from multiple retrievers\n",
"\n",
"The [EnsembleRetriever](https://python.langchain.com/v0.2/api_reference/langchain/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://python.langchain.com/v0.2/api_reference/core/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n",
"The [EnsembleRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://python.langchain.com/api_reference/core/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n",
"\n",
"By leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm. \n",
"\n",
@ -14,7 +14,7 @@
"\n",
"## Basic usage\n",
"\n",
"Below we demonstrate ensembling of a [BM25Retriever](https://python.langchain.com/v0.2/api_reference/community/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://python.langchain.com/v0.2/api_reference/community/vectorstores/langchain_community.vectorstores.faiss.FAISS.html)."
"Below we demonstrate ensembling of a [BM25Retriever](https://python.langchain.com/api_reference/community/retrievers/langchain_community.retrievers.bm25.BM25Retriever.html) with a retriever derived from the [FAISS vector store](https://python.langchain.com/api_reference/community/vectorstores/langchain_community.vectorstores.faiss.FAISS.html)."
]
},
{

View File

@ -16,11 +16,11 @@
"also with JSON more or prompt based techniques.\n",
":::\n",
"\n",
"LangChain implements a [tool-call attribute](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.tool_calls) on messages from LLMs that include tool calls. See our [how-to guide on tool calling](/docs/how_to/tool_calling) for more detail. To build reference examples for data extraction, we build a chat history containing a sequence of: \n",
"LangChain implements a [tool-call attribute](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.tool_calls) on messages from LLMs that include tool calls. See our [how-to guide on tool calling](/docs/how_to/tool_calling) for more detail. To build reference examples for data extraction, we build a chat history containing a sequence of: \n",
"\n",
"- [HumanMessage](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.human.HumanMessage.html) containing example inputs;\n",
"- [AIMessage](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html) containing example tool calls;\n",
"- [ToolMessage](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.tool.ToolMessage.html) containing example tool outputs.\n",
"- [HumanMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.human.HumanMessage.html) containing example inputs;\n",
"- [AIMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html) containing example tool calls;\n",
"- [ToolMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolMessage.html) containing example tool outputs.\n",
"\n",
"LangChain adopts this convention for structuring tool calls into conversation across LLM model providers.\n",
"\n",
@ -29,9 +29,16 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "89579144-bcb3-490a-8036-86a0a6bcd56b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:41.780410Z",
"iopub.status.busy": "2024-09-10T20:26:41.780102Z",
"iopub.status.idle": "2024-09-10T20:26:42.147112Z",
"shell.execute_reply": "2024-09-10T20:26:42.146838Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
@ -67,17 +74,24 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"id": "610c3025-ea63-4cd7-88bd-c8cbcb4d8a3f",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.148746Z",
"iopub.status.busy": "2024-09-10T20:26:42.148621Z",
"iopub.status.idle": "2024-09-10T20:26:42.162044Z",
"shell.execute_reply": "2024-09-10T20:26:42.161794Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptValue(messages=[SystemMessage(content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\"), HumanMessage(content='testing 1 2 3'), HumanMessage(content='this is some text')])"
"ChatPromptValue(messages=[SystemMessage(content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\", additional_kwargs={}, response_metadata={}), HumanMessage(content='testing 1 2 3', additional_kwargs={}, response_metadata={}), HumanMessage(content='this is some text', additional_kwargs={}, response_metadata={})])"
]
},
"execution_count": 3,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@ -104,15 +118,22 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "d875a49a-d2cb-4b9e-b5bf-41073bc3905c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.163477Z",
"iopub.status.busy": "2024-09-10T20:26:42.163391Z",
"iopub.status.idle": "2024-09-10T20:26:42.324449Z",
"shell.execute_reply": "2024-09-10T20:26:42.324206Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Person(BaseModel):\n",
@ -162,9 +183,16 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "08356810-77ce-4e68-99d9-faa0326f2cee",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.326100Z",
"iopub.status.busy": "2024-09-10T20:26:42.326016Z",
"iopub.status.idle": "2024-09-10T20:26:42.329260Z",
"shell.execute_reply": "2024-09-10T20:26:42.329014Z"
}
},
"outputs": [],
"source": [
"import uuid\n",
@ -177,7 +205,7 @@
" SystemMessage,\n",
" ToolMessage,\n",
")\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Example(TypedDict):\n",
@ -238,9 +266,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "7f59a745-5c81-4011-a4c5-a33ec1eca7ef",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.330580Z",
"iopub.status.busy": "2024-09-10T20:26:42.330488Z",
"iopub.status.idle": "2024-09-10T20:26:42.332813Z",
"shell.execute_reply": "2024-09-10T20:26:42.332598Z"
}
},
"outputs": [],
"source": [
"examples = [\n",
@ -273,22 +308,29 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 6,
"id": "976bb7b8-09c4-4a3e-80df-49a483705c08",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.333955Z",
"iopub.status.busy": "2024-09-10T20:26:42.333876Z",
"iopub.status.idle": "2024-09-10T20:26:42.336841Z",
"shell.execute_reply": "2024-09-10T20:26:42.336635Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"system: content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\"\n",
"human: content=\"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\"\n",
"ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': None, 'hair_color': None, 'height_in_meters': None}, 'id': 'b843ba77-4c9c-48ef-92a4-54e534f24521'}]\n",
"tool: content='You have correctly called this tool.' tool_call_id='b843ba77-4c9c-48ef-92a4-54e534f24521'\n",
"human: content='Fiona traveled far from France to Spain.'\n",
"ai: content='' tool_calls=[{'name': 'Person', 'args': {'name': 'Fiona', 'hair_color': None, 'height_in_meters': None}, 'id': '46f00d6b-50e5-4482-9406-b07bb10340f6'}]\n",
"tool: content='You have correctly called this tool.' tool_call_id='46f00d6b-50e5-4482-9406-b07bb10340f6'\n",
"human: content='this is some text'\n"
"system: content=\"You are an expert extraction algorithm. Only extract relevant information from the text. If you do not know the value of an attribute asked to extract, return null for the attribute's value.\" additional_kwargs={} response_metadata={}\n",
"human: content=\"The ocean is vast and blue. It's more than 20,000 feet deep. There are many fish in it.\" additional_kwargs={} response_metadata={}\n",
"ai: content='' additional_kwargs={} response_metadata={} tool_calls=[{'name': 'Data', 'args': {'people': []}, 'id': '240159b1-1405-4107-a07c-3c6b91b3d5b7', 'type': 'tool_call'}]\n",
"tool: content='You have correctly called this tool.' tool_call_id='240159b1-1405-4107-a07c-3c6b91b3d5b7'\n",
"human: content='Fiona traveled far from France to Spain.' additional_kwargs={} response_metadata={}\n",
"ai: content='' additional_kwargs={} response_metadata={} tool_calls=[{'name': 'Data', 'args': {'people': [{'name': 'Fiona', 'hair_color': None, 'height_in_meters': None}]}, 'id': '3fc521e4-d1d2-4c20-bf40-e3d72f1068da', 'type': 'tool_call'}]\n",
"tool: content='You have correctly called this tool.' tool_call_id='3fc521e4-d1d2-4c20-bf40-e3d72f1068da'\n",
"human: content='this is some text' additional_kwargs={} response_metadata={}\n"
]
}
],
@ -320,9 +362,16 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"id": "df2e1ee1-69e8-4c4d-b349-95f2e320317b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.338001Z",
"iopub.status.busy": "2024-09-10T20:26:42.337915Z",
"iopub.status.idle": "2024-09-10T20:26:42.349121Z",
"shell.execute_reply": "2024-09-10T20:26:42.348908Z"
}
},
"outputs": [],
"source": [
"# | output: false\n",
@ -343,9 +392,16 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"id": "dbfea43d-769b-42e9-a76f-ce722f7d6f93",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.350335Z",
"iopub.status.busy": "2024-09-10T20:26:42.350264Z",
"iopub.status.idle": "2024-09-10T20:26:42.424894Z",
"shell.execute_reply": "2024-09-10T20:26:42.424623Z"
}
},
"outputs": [],
"source": [
"runnable = prompt | llm.with_structured_output(\n",
@ -367,18 +423,49 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 9,
"id": "66545cab-af2a-40a4-9dc9-b4110458b7d3",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:42.426258Z",
"iopub.status.busy": "2024-09-10T20:26:42.426187Z",
"iopub.status.idle": "2024-09-10T20:26:46.151633Z",
"shell.execute_reply": "2024-09-10T20:26:46.150690Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
"people=[]\n",
"people=[Person(name='earth', hair_color='null', height_in_meters='null')]\n",
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
}
@ -401,18 +488,49 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 10,
"id": "1c09d805-ec16-4123-aef9-6a5b59499b5c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:46.155346Z",
"iopub.status.busy": "2024-09-10T20:26:46.155110Z",
"iopub.status.idle": "2024-09-10T20:26:51.810359Z",
"shell.execute_reply": "2024-09-10T20:26:51.809636Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n",
"people=[]\n",
"people=[]\n",
"people=[]\n",
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"people=[]\n"
]
}
@ -435,9 +553,16 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 11,
"id": "a9b7a762-1b75-4f9f-b9d9-6732dd05802c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:26:51.813309Z",
"iopub.status.busy": "2024-09-10T20:26:51.813150Z",
"iopub.status.idle": "2024-09-10T20:26:53.474153Z",
"shell.execute_reply": "2024-09-10T20:26:53.473522Z"
}
},
"outputs": [
{
"data": {
@ -445,7 +570,7 @@
"Data(people=[Person(name='Harrison', hair_color='black', height_in_meters=None)])"
]
},
"execution_count": 12,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@ -476,7 +601,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -23,16 +23,56 @@
"id": "57969139-ad0a-487e-97d8-cb30e2af9742",
"metadata": {},
"source": [
"## Set up\n",
"## Setup\n",
"\n",
"We need some example data! Let's download an article about [cars from wikipedia](https://en.wikipedia.org/wiki/Car) and load it as a LangChain [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html)."
"First we'll install the dependencies needed for this guide:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "84460db2-36e1-4037-bfa6-2a11883c2ba5",
"id": "a3b4d838-5be4-4207-8a4a-9ef5624c48f2",
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:19.850767Z",
"iopub.status.busy": "2024-09-10T20:35:19.850427Z",
"iopub.status.idle": "2024-09-10T20:35:21.432233Z",
"shell.execute_reply": "2024-09-10T20:35:21.431606Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install -qU langchain-community lxml faiss-cpu langchain-openai"
]
},
{
"cell_type": "markdown",
"id": "ac000b03-33fc-414f-8f2c-3850df621a35",
"metadata": {},
"source": [
"Now we need some example data! Let's download an article about [cars from wikipedia](https://en.wikipedia.org/wiki/Car) and load it as a LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "84460db2-36e1-4037-bfa6-2a11883c2ba5",
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:21.434882Z",
"iopub.status.busy": "2024-09-10T20:35:21.434571Z",
"iopub.status.idle": "2024-09-10T20:35:22.214545Z",
"shell.execute_reply": "2024-09-10T20:35:22.214253Z"
}
},
"outputs": [],
"source": [
"import re\n",
@ -55,15 +95,22 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 3,
"id": "fcb6917b-123d-4630-a0ce-ed8b293d482d",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.216143Z",
"iopub.status.busy": "2024-09-10T20:35:22.216039Z",
"iopub.status.idle": "2024-09-10T20:35:22.218117Z",
"shell.execute_reply": "2024-09-10T20:35:22.217854Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"79174\n"
"80427\n"
]
}
],
@ -87,13 +134,20 @@
"cell_type": "code",
"execution_count": 4,
"id": "a3b288ed-87a6-4af0-aac8-20921dc370d4",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.219468Z",
"iopub.status.busy": "2024-09-10T20:35:22.219395Z",
"iopub.status.idle": "2024-09-10T20:35:22.340594Z",
"shell.execute_reply": "2024-09-10T20:35:22.340319Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class KeyDevelopment(BaseModel):\n",
@ -156,7 +210,14 @@
"cell_type": "code",
"execution_count": 5,
"id": "109f4f05-d0ff-431d-93d9-8f5aa34979a6",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.342277Z",
"iopub.status.busy": "2024-09-10T20:35:22.342171Z",
"iopub.status.idle": "2024-09-10T20:35:22.532302Z",
"shell.execute_reply": "2024-09-10T20:35:22.532034Z"
}
},
"outputs": [],
"source": [
"# | output: false\n",
@ -171,7 +232,14 @@
"cell_type": "code",
"execution_count": 6,
"id": "aa4ae224-6d3d-4fe2-b210-7db19a9fe580",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.533795Z",
"iopub.status.busy": "2024-09-10T20:35:22.533708Z",
"iopub.status.idle": "2024-09-10T20:35:22.610573Z",
"shell.execute_reply": "2024-09-10T20:35:22.610307Z"
}
},
"outputs": [],
"source": [
"extractor = prompt | llm.with_structured_output(\n",
@ -194,7 +262,14 @@
"cell_type": "code",
"execution_count": 7,
"id": "27b8a373-14b3-45ea-8bf5-9749122ad927",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.612123Z",
"iopub.status.busy": "2024-09-10T20:35:22.612052Z",
"iopub.status.idle": "2024-09-10T20:35:22.753493Z",
"shell.execute_reply": "2024-09-10T20:35:22.753179Z"
}
},
"outputs": [],
"source": [
"from langchain_text_splitters import TokenTextSplitter\n",
@ -214,7 +289,7 @@
"id": "5b43d7e0-3c85-4d97-86c7-e8c984b60b0a",
"metadata": {},
"source": [
"Use [batch](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) functionality to run the extraction in **parallel** across each chunk! \n",
"Use [batch](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) functionality to run the extraction in **parallel** across each chunk! \n",
"\n",
":::{.callout-tip}\n",
"You can often use .batch() to parallelize the extractions! `.batch` uses a threadpool under the hood to help you parallelize workloads.\n",
@ -227,7 +302,14 @@
"cell_type": "code",
"execution_count": 8,
"id": "6ba766b5-8d6c-48e6-8d69-f391a66b65d2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:22.755067Z",
"iopub.status.busy": "2024-09-10T20:35:22.754987Z",
"iopub.status.idle": "2024-09-10T20:35:36.691130Z",
"shell.execute_reply": "2024-09-10T20:35:36.690500Z"
}
},
"outputs": [],
"source": [
"# Limit just to the first 3 chunks\n",
@ -254,21 +336,27 @@
"cell_type": "code",
"execution_count": 9,
"id": "c3f77470-ce6c-477f-8957-650913218632",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:36.694799Z",
"iopub.status.busy": "2024-09-10T20:35:36.694458Z",
"iopub.status.idle": "2024-09-10T20:35:36.701416Z",
"shell.execute_reply": "2024-09-10T20:35:36.700993Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[KeyDevelopment(year=1966, description='The Toyota Corolla began production, becoming the best-selling series of automobile in history.', evidence='The Toyota Corolla, which has been in production since 1966, is the best-selling series of automobile in history.'),\n",
" KeyDevelopment(year=1769, description='Nicolas-Joseph Cugnot built the first steam-powered road vehicle.', evidence='The French inventor Nicolas-Joseph Cugnot built the first steam-powered road vehicle in 1769.'),\n",
" KeyDevelopment(year=1808, description='François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile.', evidence='the Swiss inventor François Isaac de Rivaz designed and constructed the first internal combustion-powered automobile in 1808.'),\n",
" KeyDevelopment(year=1886, description='Carl Benz patented his Benz Patent-Motorwagen, inventing the modern car.', evidence='The modern car—a practical, marketable automobile for everyday use—was invented in 1886, when the German inventor Carl Benz patented his Benz Patent-Motorwagen.'),\n",
" KeyDevelopment(year=1908, description='Ford Model T, one of the first cars affordable by the masses, began production.', evidence='One of the first cars affordable by the masses was the Ford Model T, begun in 1908, an American car manufactured by the Ford Motor Company.'),\n",
" KeyDevelopment(year=1888, description=\"Bertha Benz undertook the first road trip by car to prove the road-worthiness of her husband's invention.\", evidence=\"In August 1888, Bertha Benz, the wife of Carl Benz, undertook the first road trip by car, to prove the road-worthiness of her husband's invention.\"),\n",
"[KeyDevelopment(year=1769, description='Nicolas-Joseph Cugnot built the first full-scale, self-propelled mechanical vehicle, a steam-powered tricycle.', evidence='Nicolas-Joseph Cugnot is widely credited with building the first full-scale, self-propelled mechanical vehicle in about 1769; he created a steam-powered tricycle.'),\n",
" KeyDevelopment(year=1807, description=\"Nicéphore Niépce and his brother Claude created what was probably the world's first internal combustion engine.\", evidence=\"In 1807, Nicéphore Niépce and his brother Claude created what was probably the world's first internal combustion engine (which they called a Pyréolophore), but installed it in a boat on the river Saone in France.\"),\n",
" KeyDevelopment(year=1886, description='Carl Benz patented the Benz Patent-Motorwagen, marking the birth of the modern car.', evidence='In November 1881, French inventor Gustave Trouvé demonstrated a three-wheeled car powered by electricity at the International Exposition of Electricity. Although several other German engineers (including Gottlieb Daimler, Wilhelm Maybach, and Siegfried Marcus) were working on cars at about the same time, the year 1886 is regarded as the birth year of the modern car—a practical, marketable automobile for everyday use—when the German Carl Benz patented his Benz Patent-Motorwagen; he is generally acknowledged as the inventor of the car.'),\n",
" KeyDevelopment(year=1886, description='Carl Benz began promotion of his vehicle, marking the introduction of the first commercially available automobile.', evidence='Benz began promotion of the vehicle on 3 July 1886.'),\n",
" KeyDevelopment(year=1888, description=\"Bertha Benz undertook the first road trip by car to prove the road-worthiness of her husband's invention.\", evidence=\"In August 1888, Bertha Benz, the wife and business partner of Carl Benz, undertook the first road trip by car, to prove the road-worthiness of her husband's invention.\"),\n",
" KeyDevelopment(year=1896, description='Benz designed and patented the first internal-combustion flat engine, called boxermotor.', evidence='In 1896, Benz designed and patented the first internal-combustion flat engine, called boxermotor.'),\n",
" KeyDevelopment(year=1897, description='Nesselsdorfer Wagenbau produced the Präsident automobil, one of the first factory-made cars in the world.', evidence='The first motor car in central Europe and one of the first factory-made cars in the world, was produced by Czech company Nesselsdorfer Wagenbau (later renamed to Tatra) in 1897, the Präsident automobil.'),\n",
" KeyDevelopment(year=1890, description='Daimler Motoren Gesellschaft (DMG) was founded by Daimler and Maybach in Cannstatt.', evidence='Daimler and Maybach founded Daimler Motoren Gesellschaft (DMG) in Cannstatt in 1890.'),\n",
" KeyDevelopment(year=1891, description='Auguste Doriot and Louis Rigoulot completed the longest trip by a petrol-driven vehicle with a Daimler powered Peugeot Type 3.', evidence='In 1891, Auguste Doriot and his Peugeot colleague Louis Rigoulot completed the longest trip by a petrol-driven vehicle when their self-designed and built Daimler powered Peugeot Type 3 completed 2,100 kilometres (1,300 mi) from Valentigney to Paris and Brest and back again.')]"
" KeyDevelopment(year=1897, description='The first motor car in central Europe and one of the first factory-made cars in the world, the Präsident automobil, was produced by Nesselsdorfer Wagenbau.', evidence='The first motor car in central Europe and one of the first factory-made cars in the world, was produced by Czech company Nesselsdorfer Wagenbau (later renamed to Tatra) in 1897, the Präsident automobil.'),\n",
" KeyDevelopment(year=1901, description='Ransom Olds started large-scale, production-line manufacturing of affordable cars at his Oldsmobile factory in Lansing, Michigan.', evidence='Large-scale, production-line manufacturing of affordable cars was started by Ransom Olds in 1901 at his Oldsmobile factory in Lansing, Michigan.'),\n",
" KeyDevelopment(year=1913, description=\"Henry Ford introduced the world's first moving assembly line for cars at the Highland Park Ford Plant.\", evidence=\"This concept was greatly expanded by Henry Ford, beginning in 1913 with the world's first moving assembly line for cars at the Highland Park Ford Plant.\")]"
]
},
"execution_count": 9,
@ -315,7 +403,14 @@
"cell_type": "code",
"execution_count": 10,
"id": "aaf37c82-625b-4fa1-8e88-73303f08ac16",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:36.703897Z",
"iopub.status.busy": "2024-09-10T20:35:36.703718Z",
"iopub.status.idle": "2024-09-10T20:35:38.451523Z",
"shell.execute_reply": "2024-09-10T20:35:38.450925Z"
}
},
"outputs": [],
"source": [
"from langchain_community.vectorstores import FAISS\n",
@ -344,7 +439,14 @@
"cell_type": "code",
"execution_count": 11,
"id": "47aad00b-7013-4f7f-a1b0-02ef269093bf",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:38.455094Z",
"iopub.status.busy": "2024-09-10T20:35:38.454851Z",
"iopub.status.idle": "2024-09-10T20:35:38.458315Z",
"shell.execute_reply": "2024-09-10T20:35:38.457940Z"
}
},
"outputs": [],
"source": [
"rag_extractor = {\n",
@ -356,7 +458,14 @@
"cell_type": "code",
"execution_count": 12,
"id": "68f2de01-0cd8-456e-a959-db236189d41b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:38.460115Z",
"iopub.status.busy": "2024-09-10T20:35:38.459949Z",
"iopub.status.idle": "2024-09-10T20:35:43.195532Z",
"shell.execute_reply": "2024-09-10T20:35:43.194254Z"
}
},
"outputs": [],
"source": [
"results = rag_extractor.invoke(\"Key developments associated with cars\")"
@ -366,15 +475,21 @@
"cell_type": "code",
"execution_count": 13,
"id": "1788e2d6-77bb-417f-827c-eb96c035164e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:43.200497Z",
"iopub.status.busy": "2024-09-10T20:35:43.200037Z",
"iopub.status.idle": "2024-09-10T20:35:43.206773Z",
"shell.execute_reply": "2024-09-10T20:35:43.205426Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"year=1869 description='Mary Ward became one of the first documented car fatalities in Parsonstown, Ireland.' evidence='Mary Ward became one of the first documented car fatalities in 1869 in Parsonstown, Ireland,'\n",
"year=1899 description=\"Henry Bliss one of the US's first pedestrian car casualties in New York City.\" evidence=\"Henry Bliss one of the US's first pedestrian car casualties in 1899 in New York City.\"\n",
"year=2030 description='All fossil fuel vehicles will be banned in Amsterdam.' evidence='all fossil fuel vehicles will be banned in Amsterdam from 2030.'\n"
"year=2006 description='Car-sharing services in the US experienced double-digit growth in revenue and membership.' evidence='in the US, some car-sharing services have experienced double-digit growth in revenue and membership growth between 2006 and 2007.'\n",
"year=2020 description='56 million cars were manufactured worldwide, with China producing the most.' evidence='In 2020, there were 56 million cars manufactured worldwide, down from 67 million the previous year. The automotive industry in China produces by far the most (20 million in 2020).'\n"
]
}
],
@ -416,7 +531,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -27,9 +27,16 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "25487939-8713-4ec7-b774-e4a761ac8298",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.442501Z",
"iopub.status.busy": "2024-09-10T20:35:44.442044Z",
"iopub.status.idle": "2024-09-10T20:35:44.872217Z",
"shell.execute_reply": "2024-09-10T20:35:44.871897Z"
}
},
"outputs": [],
"source": [
"# | output: false\n",
@ -62,16 +69,23 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"id": "497eb023-c043-443d-ac62-2d4ea85fe1b0",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.873979Z",
"iopub.status.busy": "2024-09-10T20:35:44.873840Z",
"iopub.status.idle": "2024-09-10T20:35:44.878966Z",
"shell.execute_reply": "2024-09-10T20:35:44.878718Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"from pydantic import BaseModel, Field, validator\n",
"\n",
"\n",
"class Person(BaseModel):\n",
@ -114,9 +128,16 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "20b99ffb-a114-49a9-a7be-154c525f8ada",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.880355Z",
"iopub.status.busy": "2024-09-10T20:35:44.880277Z",
"iopub.status.idle": "2024-09-10T20:35:44.881834Z",
"shell.execute_reply": "2024-09-10T20:35:44.881601Z"
}
},
"outputs": [],
"source": [
"query = \"Anna is 23 years old and she is 6 feet tall\""
@ -124,9 +145,16 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "4f3a66ce-de19-4571-9e54-67504ae3fba7",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.883138Z",
"iopub.status.busy": "2024-09-10T20:35:44.883049Z",
"iopub.status.idle": "2024-09-10T20:35:44.885139Z",
"shell.execute_reply": "2024-09-10T20:35:44.884801Z"
}
},
"outputs": [
{
"name": "stdout",
@ -140,7 +168,7 @@
"\n",
"Here is the output schema:\n",
"```\n",
"{\"description\": \"Identifying information about all people in a text.\", \"properties\": {\"people\": {\"title\": \"People\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/Person\"}}}, \"required\": [\"people\"], \"definitions\": {\"Person\": {\"title\": \"Person\", \"description\": \"Information about a person.\", \"type\": \"object\", \"properties\": {\"name\": {\"title\": \"Name\", \"description\": \"The name of the person\", \"type\": \"string\"}, \"height_in_meters\": {\"title\": \"Height In Meters\", \"description\": \"The height of the person expressed in meters.\", \"type\": \"number\"}}, \"required\": [\"name\", \"height_in_meters\"]}}}\n",
"{\"$defs\": {\"Person\": {\"description\": \"Information about a person.\", \"properties\": {\"name\": {\"description\": \"The name of the person\", \"title\": \"Name\", \"type\": \"string\"}, \"height_in_meters\": {\"description\": \"The height of the person expressed in meters.\", \"title\": \"Height In Meters\", \"type\": \"number\"}}, \"required\": [\"name\", \"height_in_meters\"], \"title\": \"Person\", \"type\": \"object\"}}, \"description\": \"Identifying information about all people in a text.\", \"properties\": {\"people\": {\"items\": {\"$ref\": \"#/$defs/Person\"}, \"title\": \"People\", \"type\": \"array\"}}, \"required\": [\"people\"]}\n",
"```\n",
"Human: Anna is 23 years old and she is 6 feet tall\n"
]
@ -160,9 +188,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "7e0041eb-37dc-4384-9fe3-6dd8c356371e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:44.886765Z",
"iopub.status.busy": "2024-09-10T20:35:44.886675Z",
"iopub.status.idle": "2024-09-10T20:35:46.835960Z",
"shell.execute_reply": "2024-09-10T20:35:46.835282Z"
}
},
"outputs": [
{
"data": {
@ -170,7 +205,7 @@
"People(people=[Person(name='Anna', height_in_meters=1.83)])"
]
},
"execution_count": 6,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@ -202,16 +237,23 @@
"\n",
"If desired, it's easy to create a custom prompt and parser with `LangChain` and `LCEL`.\n",
"\n",
"To create a custom parser, define a function to parse the output from the model (typically an [AIMessage](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html)) into an object of your choice.\n",
"To create a custom parser, define a function to parse the output from the model (typically an [AIMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html)) into an object of your choice.\n",
"\n",
"See below for a simple implementation of a JSON parser."
]
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 6,
"id": "b1f11912-c1bb-4a2a-a482-79bf3996961f",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:46.839577Z",
"iopub.status.busy": "2024-09-10T20:35:46.839233Z",
"iopub.status.idle": "2024-09-10T20:35:46.849663Z",
"shell.execute_reply": "2024-09-10T20:35:46.849177Z"
}
},
"outputs": [],
"source": [
"import json\n",
@ -221,7 +263,7 @@
"from langchain_anthropic.chat_models import ChatAnthropic\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"from pydantic import BaseModel, Field, validator\n",
"\n",
"\n",
"class Person(BaseModel):\n",
@ -279,16 +321,23 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"id": "9260d5e8-3b6c-4639-9f3b-fb2f90239e4b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:46.851870Z",
"iopub.status.busy": "2024-09-10T20:35:46.851698Z",
"iopub.status.idle": "2024-09-10T20:35:46.854786Z",
"shell.execute_reply": "2024-09-10T20:35:46.854424Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"System: Answer the user query. Output your answer as JSON that matches the given schema: ```json\n",
"{'title': 'People', 'description': 'Identifying information about all people in a text.', 'type': 'object', 'properties': {'people': {'title': 'People', 'type': 'array', 'items': {'$ref': '#/definitions/Person'}}}, 'required': ['people'], 'definitions': {'Person': {'title': 'Person', 'description': 'Information about a person.', 'type': 'object', 'properties': {'name': {'title': 'Name', 'description': 'The name of the person', 'type': 'string'}, 'height_in_meters': {'title': 'Height In Meters', 'description': 'The height of the person expressed in meters.', 'type': 'number'}}, 'required': ['name', 'height_in_meters']}}}\n",
"{'$defs': {'Person': {'description': 'Information about a person.', 'properties': {'name': {'description': 'The name of the person', 'title': 'Name', 'type': 'string'}, 'height_in_meters': {'description': 'The height of the person expressed in meters.', 'title': 'Height In Meters', 'type': 'number'}}, 'required': ['name', 'height_in_meters'], 'title': 'Person', 'type': 'object'}}, 'description': 'Identifying information about all people in a text.', 'properties': {'people': {'items': {'$ref': '#/$defs/Person'}, 'title': 'People', 'type': 'array'}}, 'required': ['people'], 'title': 'People', 'type': 'object'}\n",
"```. Make sure to wrap the answer in ```json and ``` tags\n",
"Human: Anna is 23 years old and she is 6 feet tall\n"
]
@ -301,17 +350,32 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"id": "c523301d-ae0e-45e3-b195-7fd28c67a5c4",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:35:46.856945Z",
"iopub.status.busy": "2024-09-10T20:35:46.856769Z",
"iopub.status.idle": "2024-09-10T20:35:48.373728Z",
"shell.execute_reply": "2024-09-10T20:35:48.373079Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/bagatur/langchain/.venv/lib/python3.11/site-packages/pydantic/_internal/_fields.py:201: UserWarning: Field name \"schema\" in \"PromptInput\" shadows an attribute in parent \"BaseModel\"\n",
" warnings.warn(\n"
]
},
{
"data": {
"text/plain": [
"[{'people': [{'name': 'Anna', 'height_in_meters': 1.83}]}]"
]
},
"execution_count": 9,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@ -349,7 +413,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -90,7 +90,7 @@
"outputs": [],
"source": [
"# Note that we set max_retries = 0 to avoid retrying on RateLimits, etc\n",
"openai_llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", max_retries=0)\n",
"openai_llm = ChatOpenAI(model=\"gpt-4o-mini\", max_retries=0)\n",
"anthropic_llm = ChatAnthropic(model=\"claude-3-haiku-20240307\")\n",
"llm = openai_llm.with_fallbacks([anthropic_llm])"
]

View File

@ -29,7 +29,7 @@
"\n",
"In this guide, we'll learn how to create a simple prompt template that provides the model with example inputs and outputs when generating. Providing the LLM with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n",
"\n",
"A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://python.langchain.com/v0.2/api_reference/core/example_selectors/langchain_core.example_selectors.base.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n",
"A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://python.langchain.com/api_reference/core/example_selectors/langchain_core.example_selectors.base.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n",
"\n",
"This guide will cover few-shotting with string prompt templates. For a guide on few-shotting with chat messages for chat models, see [here](/docs/how_to/few_shot_examples_chat/).\n",
"\n",
@ -160,7 +160,7 @@
"source": [
"### Pass the examples and formatter to `FewShotPromptTemplate`\n",
"\n",
"Finally, create a [`FewShotPromptTemplate`](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) object. This object takes in the few-shot examples and the formatter for the few-shot examples. When this `FewShotPromptTemplate` is formatted, it formats the passed examples using the `example_prompt`, then and adds them to the final prompt before `suffix`:"
"Finally, create a [`FewShotPromptTemplate`](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.few_shot.FewShotPromptTemplate.html) object. This object takes in the few-shot examples and the formatter for the few-shot examples. When this `FewShotPromptTemplate` is formatted, it formats the passed examples using the `example_prompt`, then and adds them to the final prompt before `suffix`:"
]
},
{
@ -251,7 +251,7 @@
"source": [
"## Using an example selector\n",
"\n",
"We will reuse the example set and the formatter from the previous section. However, instead of feeding the examples directly into the `FewShotPromptTemplate` object, we will feed them into an implementation of `ExampleSelector` called [`SemanticSimilarityExampleSelector`](https://python.langchain.com/v0.2/api_reference/core/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html) instance. This class selects few-shot examples from the initial set based on their similarity to the input. It uses an embedding model to compute the similarity between the input and the few-shot examples, as well as a vector store to perform the nearest neighbor search.\n",
"We will reuse the example set and the formatter from the previous section. However, instead of feeding the examples directly into the `FewShotPromptTemplate` object, we will feed them into an implementation of `ExampleSelector` called [`SemanticSimilarityExampleSelector`](https://python.langchain.com/api_reference/core/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html) instance. This class selects few-shot examples from the initial set based on their similarity to the input. It uses an embedding model to compute the similarity between the input and the few-shot examples, as well as a vector store to perform the nearest neighbor search.\n",
"\n",
"To show what it looks like, let's initialize an instance and call it in isolation:"
]

View File

@ -29,7 +29,7 @@
"\n",
"This guide covers how to prompt a chat model with example inputs and outputs. Providing the model with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n",
"\n",
"There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.few_shot.FewShotChatMessagePromptTemplate.html?highlight=fewshot#langchain_core.prompts.few_shot.FewShotChatMessagePromptTemplate) as a flexible starting point, and you can modify or replace them as you see fit.\n",
"There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.few_shot.FewShotChatMessagePromptTemplate.html?highlight=fewshot#langchain_core.prompts.few_shot.FewShotChatMessagePromptTemplate) as a flexible starting point, and you can modify or replace them as you see fit.\n",
"\n",
"The goal of few-shot prompt templates are to dynamically select examples based on an input, and then format the examples in a final prompt to provide for the model.\n",
"\n",
@ -49,7 +49,7 @@
"\n",
"The basic components of the template are:\n",
"- `examples`: A list of dictionary examples to include in the final prompt.\n",
"- `example_prompt`: converts each example into 1 or more messages through its [`format_messages`](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html?highlight=format_messages#langchain_core.prompts.chat.ChatPromptTemplate.format_messages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n",
"- `example_prompt`: converts each example into 1 or more messages through its [`format_messages`](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html?highlight=format_messages#langchain_core.prompts.chat.ChatPromptTemplate.format_messages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n",
"\n",
"Below is a simple demonstration. First, define the examples you'd like to include. Let's give the LLM an unfamiliar mathematical operator, denoted by the \"🦜\" emoji:"
]
@ -66,7 +66,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@ -86,7 +87,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='The expression \"2 🦜 9\" is not a standard mathematical operation or equation. It appears to be a combination of the number 2 and the parrot emoji 🦜 followed by the number 9. It does not have a specific mathematical meaning.', response_metadata={'token_usage': {'completion_tokens': 54, 'prompt_tokens': 17, 'total_tokens': 71}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-aad12dda-5c47-4a1e-9949-6fe94e03242a-0', usage_metadata={'input_tokens': 17, 'output_tokens': 54, 'total_tokens': 71})"
"AIMessage(content='The expression \"2 🦜 9\" is not a standard mathematical operation or equation. It appears to be a combination of the number 2 and the parrot emoji 🦜 followed by the number 9. It does not have a specific mathematical meaning.', response_metadata={'token_usage': {'completion_tokens': 54, 'prompt_tokens': 17, 'total_tokens': 71}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-aad12dda-5c47-4a1e-9949-6fe94e03242a-0', usage_metadata={'input_tokens': 17, 'output_tokens': 54, 'total_tokens': 71})"
]
},
"execution_count": 4,
@ -97,7 +98,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
"model = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0.0)\n",
"\n",
"model.invoke(\"What is 2 🦜 9?\")"
]
@ -212,7 +213,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='11', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5ec4e051-262f-408e-ad00-3f2ebeb561c3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
"AIMessage(content='11', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5ec4e051-262f-408e-ad00-3f2ebeb561c3-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
]
},
"execution_count": 8,
@ -239,8 +240,8 @@
"\n",
"Sometimes you may want to select only a few examples from your overall set to show based on the input. For this, you can replace the `examples` passed into `FewShotChatMessagePromptTemplate` with an `example_selector`. The other components remain the same as above! Our dynamic few-shot prompt template would look like:\n",
"\n",
"- `example_selector`: responsible for selecting few-shot examples (and the order in which they are returned) for a given input. These implement the [BaseExampleSelector](https://python.langchain.com/v0.2/api_reference/core/example_selectors/langchain_core.example_selectors.base.BaseExampleSelector.html?highlight=baseexampleselector#langchain_core.example_selectors.base.BaseExampleSelector) interface. A common example is the vectorstore-backed [SemanticSimilarityExampleSelector](https://python.langchain.com/v0.2/api_reference/core/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html?highlight=semanticsimilarityexampleselector#langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector)\n",
"- `example_prompt`: convert each example into 1 or more messages through its [`format_messages`](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html?highlight=chatprompttemplate#langchain_core.prompts.chat.ChatPromptTemplate.format_messages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n",
"- `example_selector`: responsible for selecting few-shot examples (and the order in which they are returned) for a given input. These implement the [BaseExampleSelector](https://python.langchain.com/api_reference/core/example_selectors/langchain_core.example_selectors.base.BaseExampleSelector.html?highlight=baseexampleselector#langchain_core.example_selectors.base.BaseExampleSelector) interface. A common example is the vectorstore-backed [SemanticSimilarityExampleSelector](https://python.langchain.com/api_reference/core/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html?highlight=semanticsimilarityexampleselector#langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector)\n",
"- `example_prompt`: convert each example into 1 or more messages through its [`format_messages`](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html?highlight=chatprompttemplate#langchain_core.prompts.chat.ChatPromptTemplate.format_messages) method. A common example would be to convert each example into one human message and one AI message response, or a human message followed by a function call message.\n",
"\n",
"These once again can be composed with other messages and chat templates to assemble your final prompt.\n",
"\n",
@ -418,7 +419,7 @@
{
"data": {
"text/plain": [
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d1863e5e-17cd-4e9d-bf7a-b9f118747a65-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
"AIMessage(content='6', response_metadata={'token_usage': {'completion_tokens': 1, 'prompt_tokens': 60, 'total_tokens': 61}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-d1863e5e-17cd-4e9d-bf7a-b9f118747a65-0', usage_metadata={'input_tokens': 60, 'output_tokens': 1, 'total_tokens': 61})"
]
},
"execution_count": 13,
@ -427,7 +428,7 @@
}
],
"source": [
"chain = final_prompt | ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0.0)\n",
"chain = final_prompt | ChatOpenAI(model=\"gpt-4o-mini\", temperature=0.0)\n",
"\n",
"chain.invoke({\"input\": \"What's 3 🦜 3?\"})"
]

View File

@ -175,7 +175,7 @@
"source": [
"## API reference\n",
"\n",
"For a complete description of all arguments head to the API reference: https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.utils.filter_messages.html"
"For a complete description of all arguments head to the API reference: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.utils.filter_messages.html"
]
}
],

View File

@ -88,7 +88,7 @@
"## Passing tools to LLMs\n",
"\n",
"Chat models supporting tool calling features implement a `.bind_tools` method, which \n",
"receives a list of LangChain [tool objects](https://python.langchain.com/v0.2/api_reference/core/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
"receives a list of LangChain [tool objects](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
"and binds them to the chat model in its expected format. Subsequent invocations of the \n",
"chat model will include tool schemas in its calls to the LLM.\n",
"\n",
@ -136,7 +136,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"# Note that the docstrings here are crucial, as they will be passed along\n",
@ -191,7 +191,7 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
]
},
{
@ -212,9 +212,9 @@
"## Tool calls\n",
"\n",
"If tool calls are included in a LLM response, they are attached to the corresponding \n",
"[message](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage) \n",
"or [message chunk](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
"as a list of [tool call](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.tool.ToolCall.html#langchain_core.messages.tool.ToolCall) \n",
"[message](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage) \n",
"or [message chunk](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
"as a list of [tool call](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolCall.html#langchain_core.messages.tool.ToolCall) \n",
"objects in the `.tool_calls` attribute. A `ToolCall` is a typed dict that includes a \n",
"tool name, dict of argument values, and (optionally) an identifier. Messages with no \n",
"tool calls default to an empty list for this attribute.\n",
@ -258,7 +258,7 @@
"The `.tool_calls` attribute should contain valid tool calls. Note that on occasion, \n",
"model providers may output malformed tool calls (e.g., arguments that are not \n",
"valid JSON). When parsing fails in these cases, instances \n",
"of [InvalidToolCall](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.tool.InvalidToolCall.html#langchain_core.messages.tool.InvalidToolCall) \n",
"of [InvalidToolCall](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.InvalidToolCall.html#langchain_core.messages.tool.InvalidToolCall) \n",
"are populated in the `.invalid_tool_calls` attribute. An `InvalidToolCall` can have \n",
"a name, string arguments, identifier, and error message.\n",
"\n",
@ -298,8 +298,8 @@
"### Streaming\n",
"\n",
"When tools are called in a streaming context, \n",
"[message chunks](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
"will be populated with [tool call chunk](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n",
"[message chunks](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
"will be populated with [tool call chunk](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n",
"objects in a list via the `.tool_call_chunks` attribute. A `ToolCallChunk` includes \n",
"optional string fields for the tool `name`, `args`, and `id`, and includes an optional \n",
"integer field `index` that can be used to join chunks together. Fields are optional \n",
@ -307,7 +307,7 @@
"that includes a substring of the arguments may have null values for the tool name and id).\n",
"\n",
"Because message chunks inherit from their parent message class, an \n",
"[AIMessageChunk](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
"[AIMessageChunk](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n",
"with tool call chunks will also include `.tool_calls` and `.invalid_tool_calls` fields. \n",
"These fields are parsed best-effort from the message's tool call chunks.\n",
"\n",
@ -696,7 +696,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -26,7 +26,7 @@
"\n",
":::\n",
"\n",
"You can use arbitrary functions as [Runnables](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable). This is useful for formatting or when you need functionality not provided by other LangChain components, and custom functions used as Runnables are called [`RunnableLambdas`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableLambda.html).\n",
"You can use arbitrary functions as [Runnables](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable). This is useful for formatting or when you need functionality not provided by other LangChain components, and custom functions used as Runnables are called [`RunnableLambdas`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableLambda.html).\n",
"\n",
"Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single dict input and unpacks it into multiple arguments.\n",
"\n",
@ -54,7 +54,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@ -210,7 +211,7 @@
"\n",
"## Passing run metadata\n",
"\n",
"Runnable lambdas can optionally accept a [RunnableConfig](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.config.RunnableConfig.html#langchain_core.runnables.config.RunnableConfig) parameter, which they can use to pass callbacks, tags, and other configuration information to nested runs."
"Runnable lambdas can optionally accept a [RunnableConfig](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.config.RunnableConfig.html#langchain_core.runnables.config.RunnableConfig) parameter, which they can use to pass callbacks, tags, and other configuration information to nested runs."
]
},
{
@ -303,7 +304,7 @@
"## Streaming\n",
"\n",
":::{.callout-note}\n",
"[RunnableLambda](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableLambda.html) is best suited for code that does not need to support streaming. If you need to support streaming (i.e., be able to operate on chunks of inputs and yield chunks of outputs), use [RunnableGenerator](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableGenerator.html) instead as in the example below.\n",
"[RunnableLambda](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableLambda.html) is best suited for code that does not need to support streaming. If you need to support streaming (i.e., be able to operate on chunks of inputs and yield chunks of outputs), use [RunnableGenerator](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableGenerator.html) instead as in the example below.\n",
":::\n",
"\n",
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a chain.\n",

View File

@ -163,8 +163,8 @@
"from typing import List, Optional\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",

View File

@ -347,7 +347,7 @@
"\n",
"If we have enough examples, we may want to only include the most relevant ones in the prompt, either because they don't fit in the model's context window or because the long tail of examples distracts the model. And specifically, given any input we want to include the examples most relevant to that input.\n",
"\n",
"We can do just this using an ExampleSelector. In this case we'll use a [SemanticSimilarityExampleSelector](https://python.langchain.com/v0.2/api_reference/core/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html), which will store the examples in the vector database of our choosing. At runtime it will perform a similarity search between the input and our examples, and return the most semantically similar ones: "
"We can do just this using an ExampleSelector. In this case we'll use a [SemanticSimilarityExampleSelector](https://python.langchain.com/api_reference/core/example_selectors/langchain_core.example_selectors.semantic_similarity.SemanticSimilarityExampleSelector.html), which will store the examples in the vector database of our choosing. At runtime it will perform a similarity search between the input and our examples, and return the most semantically similar ones: "
]
},
{

View File

@ -177,14 +177,15 @@
"source": [
"from typing import Optional, Type\n",
"\n",
"# Import things that are needed generically\n",
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.callbacks import (\n",
" AsyncCallbackManagerForToolRun,\n",
" CallbackManagerForToolRun,\n",
")\n",
"from langchain_core.tools import BaseTool\n",
"\n",
"# Import things that are needed generically\n",
"from pydantic import BaseModel, Field\n",
"\n",
"description_query = \"\"\"\n",
"MATCH (m:Movie|Person)\n",
"WHERE m.title CONTAINS $candidate OR m.name CONTAINS $candidate\n",
@ -226,14 +227,15 @@
"source": [
"from typing import Optional, Type\n",
"\n",
"# Import things that are needed generically\n",
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.callbacks import (\n",
" AsyncCallbackManagerForToolRun,\n",
" CallbackManagerForToolRun,\n",
")\n",
"from langchain_core.tools import BaseTool\n",
"\n",
"# Import things that are needed generically\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class InformationInput(BaseModel):\n",
" entity: str = Field(description=\"movie or a person mentioned in the question\")\n",

View File

@ -9,7 +9,7 @@ Here youll find answers to “How do I….?” types of questions.
These guides are *goal-oriented* and *concrete*; they're meant to help you complete a specific task.
For conceptual explanations see the [Conceptual guide](/docs/concepts/).
For end-to-end walkthroughs see [Tutorials](/docs/tutorials).
For comprehensive descriptions of every class and function see the [API Reference](https://python.langchain.com/v0.2/api_reference/).
For comprehensive descriptions of every class and function see the [API Reference](https://python.langchain.com/api_reference/).
## Installation
@ -27,7 +27,7 @@ This highlights functionality that is core to using LangChain.
## LangChain Expression Language (LCEL)
[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) protocol.
[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html) protocol.
[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives.

View File

@ -7,10 +7,10 @@
"source": [
"# LangChain Expression Language Cheatsheet\n",
"\n",
"This is a quick reference for all the most important LCEL primitives. For more advanced usage see the [LCEL how-to guides](/docs/how_to/#langchain-expression-language-lcel) and the [full API reference](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html).\n",
"This is a quick reference for all the most important LCEL primitives. For more advanced usage see the [LCEL how-to guides](/docs/how_to/#langchain-expression-language-lcel) and the [full API reference](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html).\n",
"\n",
"### Invoke a runnable\n",
"#### [Runnable.invoke()](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.invoke) / [Runnable.ainvoke()](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.ainvoke)"
"#### [Runnable.invoke()](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.invoke) / [Runnable.ainvoke()](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.ainvoke)"
]
},
{
@ -46,7 +46,7 @@
"metadata": {},
"source": [
"### Batch a runnable\n",
"#### [Runnable.batch()](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.batch) / [Runnable.abatch()](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.abatch)"
"#### [Runnable.batch()](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.batch) / [Runnable.abatch()](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.abatch)"
]
},
{
@ -82,7 +82,7 @@
"metadata": {},
"source": [
"### Stream a runnable\n",
"#### [Runnable.stream()](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) / [Runnable.astream()](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream)"
"#### [Runnable.stream()](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) / [Runnable.astream()](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream)"
]
},
{
@ -165,7 +165,7 @@
"metadata": {},
"source": [
"### Invoke runnables in parallel\n",
"#### [RunnableParallel](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableParallel.html)"
"#### [RunnableParallel](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableParallel.html)"
]
},
{
@ -202,7 +202,7 @@
"metadata": {},
"source": [
"### Turn any function into a runnable\n",
"#### [RunnableLambda](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableLambda.html)"
"#### [RunnableLambda](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableLambda.html)"
]
},
{
@ -240,7 +240,7 @@
"metadata": {},
"source": [
"### Merge input and output dicts\n",
"#### [RunnablePassthrough.assign](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)"
"#### [RunnablePassthrough.assign](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)"
]
},
{
@ -276,7 +276,7 @@
"metadata": {},
"source": [
"### Include input dict in output dict\n",
"#### [RunnablePassthrough](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)"
"#### [RunnablePassthrough](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html)"
]
},
{
@ -316,7 +316,7 @@
"metadata": {},
"source": [
"### Add default invocation args\n",
"#### [Runnable.bind](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind)"
"#### [Runnable.bind](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind)"
]
},
{
@ -360,7 +360,7 @@
"metadata": {},
"source": [
"### Add fallbacks\n",
"#### [Runnable.with_fallbacks](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_fallbacks)"
"#### [Runnable.with_fallbacks](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_fallbacks)"
]
},
{
@ -397,7 +397,7 @@
"metadata": {},
"source": [
"### Add retries\n",
"#### [Runnable.with_retry](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_retry)"
"#### [Runnable.with_retry](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_retry)"
]
},
{
@ -449,7 +449,7 @@
"metadata": {},
"source": [
"### Configure runnable execution\n",
"#### [RunnableConfig](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.config.RunnableConfig.html)"
"#### [RunnableConfig](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.config.RunnableConfig.html)"
]
},
{
@ -487,7 +487,7 @@
"metadata": {},
"source": [
"### Add default config to runnable\n",
"#### [Runnable.with_config](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config)"
"#### [Runnable.with_config](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config)"
]
},
{
@ -526,7 +526,7 @@
"metadata": {},
"source": [
"### Make runnable attributes configurable\n",
"#### [Runnable.with_configurable_fields](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableSerializable.html#langchain_core.runnables.base.RunnableSerializable.configurable_fields)"
"#### [Runnable.with_configurable_fields](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableSerializable.html#langchain_core.runnables.base.RunnableSerializable.configurable_fields)"
]
},
{
@ -605,7 +605,7 @@
"metadata": {},
"source": [
"### Make chain components configurable\n",
"#### [Runnable.with_configurable_alternatives](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableSerializable.html#langchain_core.runnables.base.RunnableSerializable.configurable_alternatives)"
"#### [Runnable.with_configurable_alternatives](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableSerializable.html#langchain_core.runnables.base.RunnableSerializable.configurable_alternatives)"
]
},
{
@ -745,7 +745,7 @@
"metadata": {},
"source": [
"### Generate a stream of events\n",
"#### [Runnable.astream_events](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events)"
"#### [Runnable.astream_events](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events)"
]
},
{
@ -817,7 +817,7 @@
"metadata": {},
"source": [
"### Yield batched outputs as they complete\n",
"#### [Runnable.batch_as_completed](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.batch_as_completed) / [Runnable.abatch_as_completed](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.abatch_as_completed)"
"#### [Runnable.batch_as_completed](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.batch_as_completed) / [Runnable.abatch_as_completed](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.abatch_as_completed)"
]
},
{
@ -858,7 +858,7 @@
"metadata": {},
"source": [
"### Return subset of output dict\n",
"#### [Runnable.pick](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.pick)"
"#### [Runnable.pick](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.pick)"
]
},
{
@ -893,7 +893,7 @@
"metadata": {},
"source": [
"### Declaratively make a batched version of a runnable\n",
"#### [Runnable.map](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.map)"
"#### [Runnable.map](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.map)"
]
},
{
@ -930,7 +930,7 @@
"metadata": {},
"source": [
"### Get a graph representation of a runnable\n",
"#### [Runnable.get_graph](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.get_graph)"
"#### [Runnable.get_graph](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.get_graph)"
]
},
{
@ -991,7 +991,7 @@
"metadata": {},
"source": [
"### Get all prompts in a chain\n",
"#### [Runnable.get_prompts](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.get_prompts)"
"#### [Runnable.get_prompts](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.get_prompts)"
]
},
{
@ -1071,7 +1071,7 @@
"metadata": {},
"source": [
"### Add lifecycle listeners\n",
"#### [Runnable.with_listeners](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_listeners)"
"#### [Runnable.with_listeners](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_listeners)"
]
},
{

View File

@ -25,7 +25,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"# Please manually enter OpenAI Key"
]
},

View File

@ -24,7 +24,7 @@
"\n",
"There are some API-specific callback context managers that allow you to track token usage across multiple calls. You'll need to check whether such an integration is available for your particular model.\n",
"\n",
"If such an integration is not available for your model, you can create a custom callback manager by adapting the implementation of the [OpenAI callback manager](https://python.langchain.com/v0.2/api_reference/community/callbacks/langchain_community.callbacks.openai_info.OpenAICallbackHandler.html).\n",
"If such an integration is not available for your model, you can create a custom callback manager by adapting the implementation of the [OpenAI callback manager](https://python.langchain.com/api_reference/community/callbacks/langchain_community.callbacks.openai_info.OpenAICallbackHandler.html).\n",
"\n",
"### OpenAI\n",
"\n",

View File

@ -244,7 +244,7 @@
"\n",
"* E.g., for Llama 2 7b: `ollama pull llama2` will download the most basic version of the model (e.g., smallest # parameters and 4 bit quantization)\n",
"* We can also specify a particular version from the [model list](https://github.com/jmorganca/ollama?tab=readme-ov-file#model-library), e.g., `ollama pull llama2:13b`\n",
"* See the full set of parameters on the [API reference page](https://python.langchain.com/v0.2/api_reference/community/llms/langchain_community.llms.ollama.Ollama.html)"
"* See the full set of parameters on the [API reference page](https://python.langchain.com/api_reference/community/llms/langchain_community.llms.ollama.Ollama.html)"
]
},
{
@ -280,9 +280,9 @@
"\n",
"For example, below we run inference on `llama2-13b` with 4 bit quantization downloaded from [HuggingFace](https://huggingface.co/TheBloke/Llama-2-13B-GGML/tree/main).\n",
"\n",
"As noted above, see the [API reference](https://python.langchain.com/v0.2/api_reference/langchain/llms/langchain.llms.llamacpp.LlamaCpp.html?highlight=llamacpp#langchain.llms.llamacpp.LlamaCpp) for the full set of parameters. \n",
"As noted above, see the [API reference](https://python.langchain.com/api_reference/langchain/llms/langchain.llms.llamacpp.LlamaCpp.html?highlight=llamacpp#langchain.llms.llamacpp.LlamaCpp) for the full set of parameters. \n",
"\n",
"From the [llama.cpp API reference docs](https://python.langchain.com/v0.2/api_reference/community/llms/langchain_community.llms.llamacpp.LlamaCpp.html), a few are worth commenting on:\n",
"From the [llama.cpp API reference docs](https://python.langchain.com/api_reference/community/llms/langchain_community.llms.llamacpp.LlamaCpp.html), a few are worth commenting on:\n",
"\n",
"`n_gpu_layers`: number of layers to be loaded into GPU memory\n",
"\n",
@ -416,7 +416,7 @@
"\n",
"We can use model weights downloaded from [GPT4All](/docs/integrations/llms/gpt4all) model explorer.\n",
"\n",
"Similar to what is shown above, we can run inference and use [the API reference](https://python.langchain.com/v0.2/api_reference/community/llms/langchain_community.llms.gpt4all.GPT4All.html) to set parameters of interest."
"Similar to what is shown above, we can run inference and use [the API reference](https://python.langchain.com/api_reference/community/llms/langchain_community.llms.gpt4all.GPT4All.html) to set parameters of interest."
]
},
{

View File

@ -55,7 +55,7 @@
"id": "f88ffa0d-f4a7-482c-88de-cbec501a79b1",
"metadata": {},
"source": [
"For the OpenAI API to return log probabilities we need to configure the `logprobs=True` param. Then, the logprobs are included on each output [`AIMessage`](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html) as part of the `response_metadata`:"
"For the OpenAI API to return log probabilities we need to configure the `logprobs=True` param. Then, the logprobs are included on each output [`AIMessage`](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html) as part of the `response_metadata`:"
]
},
{
@ -94,7 +94,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\").bind(logprobs=True)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\").bind(logprobs=True)\n",
"\n",
"msg = llm.invoke((\"human\", \"how are you today\"))\n",
"\n",

View File

@ -13,7 +13,7 @@
"\n",
"To mitigate the [\"lost in the middle\"](https://arxiv.org/abs/2307.03172) effect, you can re-order documents after retrieval such that the most relevant documents are positioned at extrema (e.g., the first and last pieces of context), and the least relevant documents are positioned in the middle. In some cases this can help surface the most relevant information to LLMs.\n",
"\n",
"The [LongContextReorder](https://python.langchain.com/v0.2/api_reference/community/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html) document transformer implements this re-ordering procedure. Below we demonstrate an example."
"The [LongContextReorder](https://python.langchain.com/api_reference/community/document_transformers/langchain_community.document_transformers.long_context_reorder.LongContextReorder.html) document transformer implements this re-ordering procedure. Below we demonstrate an example."
]
},
{

View File

@ -17,7 +17,7 @@
"When a full paragraph or document is embedded, the embedding process considers both the overall context and the relationships between the sentences and phrases within the text. This can result in a more comprehensive vector representation that captures the broader meaning and themes of the text.\n",
"```\n",
" \n",
"As mentioned, chunking often aims to keep text with common context together. With this in mind, we might want to specifically honor the structure of the document itself. For example, a markdown file is organized by headers. Creating chunks within specific header groups is an intuitive idea. To address this challenge, we can use [MarkdownHeaderTextSplitter](https://python.langchain.com/v0.2/api_reference/text_splitters/markdown/langchain_text_splitters.markdown.MarkdownHeaderTextSplitter.html). This will split a markdown file by a specified set of headers. \n",
"As mentioned, chunking often aims to keep text with common context together. With this in mind, we might want to specifically honor the structure of the document itself. For example, a markdown file is organized by headers. Creating chunks within specific header groups is an intuitive idea. To address this challenge, we can use [MarkdownHeaderTextSplitter](https://python.langchain.com/api_reference/text_splitters/markdown/langchain_text_splitters.markdown.MarkdownHeaderTextSplitter.html). This will split a markdown file by a specified set of headers. \n",
"\n",
"For example, if we want to split this markdown:\n",
"```\n",

View File

@ -11,12 +11,30 @@
"\n",
"The `merge_message_runs` utility makes it easy to merge consecutive messages of the same type.\n",
"\n",
"### Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "198ce37f-4466-45a2-8878-d75cd01a5d23",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-core langchain-anthropic"
]
},
{
"cell_type": "markdown",
"id": "b5c3ca6e-e5b3-4151-8307-9101713a20ae",
"metadata": {},
"source": [
"## Basic usage"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 8,
"id": "1a215bbb-c05c-40b0-a6fd-d94884d517df",
"metadata": {},
"outputs": [
@ -24,11 +42,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\")\n",
"SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\", additional_kwargs={}, response_metadata={})\n",
"\n",
"HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'])\n",
"HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'], additional_kwargs={}, response_metadata={})\n",
"\n",
"AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')\n"
"AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!', additional_kwargs={}, response_metadata={})\n"
]
}
],
@ -63,38 +81,6 @@
"Notice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character."
]
},
{
"cell_type": "markdown",
"id": "11f7e8d3",
"metadata": {},
"source": [
"The `merge_message_runs` utility also works with messages composed together using the overloaded `+` operation:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b51855c5",
"metadata": {},
"outputs": [],
"source": [
"messages = (\n",
" SystemMessage(\"you're a good assistant.\")\n",
" + SystemMessage(\"you always respond with a joke.\")\n",
" + HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}])\n",
" + HumanMessage(\"and who is harrison chasing anyways\")\n",
" + AIMessage(\n",
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
" )\n",
" + AIMessage(\n",
" \"Why, he's probably chasing after the last cup of coffee in the office!\"\n",
" )\n",
")\n",
"\n",
"merged = merge_message_runs(messages)\n",
"print(\"\\n\\n\".join([repr(x) for x in merged]))"
]
},
{
"cell_type": "markdown",
"id": "1b2eee74-71c8-4168-b968-bca580c25d18",
@ -107,23 +93,30 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 9,
"id": "6d5a0283-11f8-435b-b27b-7b18f7693592",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content=[], response_metadata={'id': 'msg_01D6R8Naum57q8qBau9vLBUX', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-ac0c465b-b54f-4b8b-9295-e5951250d653-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87})"
"AIMessage(content=[], additional_kwargs={}, response_metadata={'id': 'msg_01KNGUMTuzBVfwNouLDpUMwf', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 84, 'output_tokens': 3}}, id='run-b908b198-9c24-450b-9749-9d4a8182937b-0', usage_metadata={'input_tokens': 84, 'output_tokens': 3, 'total_tokens': 87})"
]
},
"execution_count": 3,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# pip install -U langchain-anthropic\n",
"%pip install -qU langchain-anthropic\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)\n",
@ -146,19 +139,19 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 10,
"id": "460817a6-c327-429d-958e-181a8c46059c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\"),\n",
" HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways']),\n",
" AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!')]"
"[SystemMessage(content=\"you're a good assistant.\\nyou always respond with a joke.\", additional_kwargs={}, response_metadata={}),\n",
" HumanMessage(content=[{'type': 'text', 'text': \"i wonder why it's called langchain\"}, 'and who is harrison chasing anyways'], additional_kwargs={}, response_metadata={}),\n",
" AIMessage(content='Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!\\nWhy, he\\'s probably chasing after the last cup of coffee in the office!', additional_kwargs={}, response_metadata={})]"
]
},
"execution_count": 4,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -167,6 +160,53 @@
"merger.invoke(messages)"
]
},
{
"cell_type": "markdown",
"id": "4178837d-b155-492d-9404-d567accc1fa0",
"metadata": {},
"source": [
"`merge_message_runs` can also be placed after a prompt:"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "620530ab-ed05-4899-b984-bfa4cd738465",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='A convergent series is an infinite series whose partial sums approach a finite value as more terms are added. In other words, the sequence of partial sums has a limit.\\n\\nMore formally, an infinite series Σ an (where an are the terms of the series) is said to be convergent if the sequence of partial sums:\\n\\nS1 = a1\\nS2 = a1 + a2 \\nS3 = a1 + a2 + a3\\n...\\nSn = a1 + a2 + a3 + ... + an\\n...\\n\\nconverges to some finite number S as n goes to infinity. We write:\\n\\nlim n→∞ Sn = S\\n\\nThe finite number S is called the sum of the convergent infinite series.\\n\\nIf the sequence of partial sums does not approach any finite limit, the infinite series is said to be divergent.\\n\\nSome key properties:\\n- A series converges if and only if the sequence of its partial sums is a Cauchy sequence.\\n- Absolute/conditional convergence criteria help determine if a given series converges.\\n- Convergent series have many important applications in mathematics, physics, engineering etc.', additional_kwargs={}, response_metadata={'id': 'msg_01MfV6y2hep7ZNvDz24A36U4', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 267}}, id='run-9d925f58-021e-4bd0-94fc-f8f5e91010a4-0', usage_metadata={'input_tokens': 29, 'output_tokens': 267, 'total_tokens': 296})"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate(\n",
" [\n",
" (\"system\", \"You're great a {skill}\"),\n",
" (\"system\", \"You're also great at explaining things\"),\n",
" (\"human\", \"{query}\"),\n",
" ]\n",
")\n",
"chain = prompt | merger | llm\n",
"chain.invoke({\"skill\": \"math\", \"query\": \"what's the definition of a convergent series\"})"
]
},
{
"cell_type": "markdown",
"id": "51ba533a-43c7-4e5f-bd91-a4ec23ceeb34",
"metadata": {},
"source": [
"LangSmith Trace: https://smith.langchain.com/public/432150b6-9909-40a7-8ae7-944b7e657438/r/f4ad5fb2-4d38-42a6-b780-25f62617d53f"
]
},
{
"cell_type": "markdown",
"id": "4548d916-ce21-4dc6-8f19-eedb8003ace6",
@ -174,7 +214,7 @@
"source": [
"## API reference\n",
"\n",
"For a complete description of all arguments head to the API reference: https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.utils.merge_message_runs.html"
"For a complete description of all arguments head to the API reference: https://python.langchain.com/api_reference/core/messages/langchain_core.messages.utils.merge_message_runs.html"
]
}
],
@ -194,7 +234,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -32,7 +32,7 @@
"\n",
":::\n",
"\n",
"Passing conversation state into and out a chain is vital when building a chatbot. The [`RunnableWithMessageHistory`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) class lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it. Specifically, it loads previous messages in the conversation BEFORE passing it to the Runnable, and it saves the generated response as a message AFTER calling the runnable. This class also enables multiple conversations by saving each conversation with a `session_id` - it then expects a `session_id` to be passed in the config when calling the runnable, and uses that to look up the relevant conversation history.\n",
"Passing conversation state into and out a chain is vital when building a chatbot. The [`RunnableWithMessageHistory`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) class lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it. Specifically, it loads previous messages in the conversation BEFORE passing it to the Runnable, and it saves the generated response as a message AFTER calling the runnable. This class also enables multiple conversations by saving each conversation with a `session_id` - it then expects a `session_id` to be passed in the config when calling the runnable, and uses that to look up the relevant conversation history.\n",
"\n",
"![index_diagram](../../static/img/message_history.png)\n",
"\n",

View File

@ -31,7 +31,7 @@
":::\n",
"\n",
"Here we focus on how to move from legacy LangChain agents to more flexible [LangGraph](https://langchain-ai.github.io/langgraph/) agents.\n",
"LangChain agents (the [AgentExecutor](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor) in particular) have multiple configuration parameters.\n",
"LangChain agents (the [AgentExecutor](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor) in particular) have multiple configuration parameters.\n",
"In this notebook we will show how those parameters map to the LangGraph react agent executor using the [create_react_agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) prebuilt helper method.\n",
"\n",
"#### Prerequisites\n",
@ -65,9 +65,11 @@
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\""
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API key:\\n\")"
]
},
{
@ -110,7 +112,7 @@
"id": "af002033-fe51-4d14-b47c-3e9b483c8395",
"metadata": {},
"source": [
"For the LangChain [AgentExecutor](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), we define a prompt with a placeholder for the agent's scratchpad. The agent can be invoked as follows:"
"For the LangChain [AgentExecutor](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), we define a prompt with a placeholder for the agent's scratchpad. The agent can be invoked as follows:"
]
},
{
@ -381,7 +383,7 @@
"source": [
"### In LangChain\n",
"\n",
"With LangChain's [AgentExecutor](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could add chat [Memory](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.memory) so it can engage in a multi-turn conversation."
"With LangChain's [AgentExecutor](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could add chat [Memory](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.memory) so it can engage in a multi-turn conversation."
]
},
{
@ -539,7 +541,7 @@
"\n",
"### In LangChain\n",
"\n",
"With LangChain's [AgentExecutor](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could iterate over the steps using the [stream](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) (or async `astream`) methods or the [iter](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter) method. LangGraph supports stepwise iteration using [stream](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) "
"With LangChain's [AgentExecutor](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could iterate over the steps using the [stream](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) (or async `astream`) methods or the [iter](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter) method. LangGraph supports stepwise iteration using [stream](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) "
]
},
{
@ -1017,7 +1019,7 @@
"\n",
"### In LangChain\n",
"\n",
"With LangChain's [AgentExecutor](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could configure an [early_stopping_method](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.early_stopping_method) to either return a string saying \"Agent stopped due to iteration limit or time limit.\" (`\"force\"`) or prompt the LLM a final time to respond (`\"generate\"`)."
"With LangChain's [AgentExecutor](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could configure an [early_stopping_method](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.early_stopping_method) to either return a string saying \"Agent stopped due to iteration limit or time limit.\" (`\"force\"`) or prompt the LLM a final time to respond (`\"generate\"`)."
]
},
{
@ -1128,7 +1130,7 @@
"\n",
"### In LangChain\n",
"\n",
"With LangChain's [AgentExecutor](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), you could trim the intermediate steps of long-running agents using [trim_intermediate_steps](https://python.langchain.com/v0.2/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.trim_intermediate_steps), which is either an integer (indicating the agent should keep the last N steps) or a custom function.\n",
"With LangChain's [AgentExecutor](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), you could trim the intermediate steps of long-running agents using [trim_intermediate_steps](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.trim_intermediate_steps), which is either an integer (indicating the agent should keep the last N steps) or a custom function.\n",
"\n",
"For instance, we could trim the value so the agent only sees the most recent intermediate step."
]
@ -1325,7 +1327,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -9,17 +9,17 @@
"\n",
"It can often be useful to store multiple vectors per document. There are multiple use cases where this is beneficial. For example, we can embed multiple chunks of a document and associate those embeddings with the parent document, allowing retriever hits on the chunks to return the larger document.\n",
"\n",
"LangChain implements a base [MultiVectorRetriever](https://python.langchain.com/v0.2/api_reference/langchain/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html), which simplifies this process. Much of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n",
"LangChain implements a base [MultiVectorRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html), which simplifies this process. Much of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n",
"\n",
"The methods to create multiple vectors per document include:\n",
"\n",
"- Smaller chunks: split a document into smaller chunks, and embed those (this is [ParentDocumentRetriever](https://python.langchain.com/v0.2/api_reference/langchain/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html)).\n",
"- Smaller chunks: split a document into smaller chunks, and embed those (this is [ParentDocumentRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html)).\n",
"- Summary: create a summary for each document, embed that along with (or instead of) the document.\n",
"- Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document.\n",
"\n",
"Note that this also enables another method of adding embeddings - manually. This is useful because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control.\n",
"\n",
"Below we walk through an example. First we instantiate some documents. We will index them in an (in-memory) [Chroma](/docs/integrations/providers/chroma/) vector store using [OpenAI](https://python.langchain.com/v0.2/docs/integrations/text_embedding/openai/) embeddings, but any LangChain vector store or embeddings model will suffice."
"Below we walk through an example. First we instantiate some documents. We will index them in an (in-memory) [Chroma](/docs/integrations/providers/chroma/) vector store using [OpenAI](https://python.langchain.com/docs/integrations/text_embedding/openai/) embeddings, but any LangChain vector store or embeddings model will suffice."
]
},
{
@ -68,7 +68,7 @@
"source": [
"## Smaller chunks\n",
"\n",
"Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the [ParentDocumentRetriever](https://python.langchain.com/v0.2/api_reference/langchain/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html) does. Here we show what is going on under the hood.\n",
"Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the [ParentDocumentRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.parent_document_retriever.ParentDocumentRetriever.html) does. Here we show what is going on under the hood.\n",
"\n",
"We will make a distinction between the vector store, which indexes embeddings of the (sub) documents, and the document store, which houses the \"parent\" documents and associates them with an identifier."
]
@ -103,7 +103,7 @@
"id": "d4feded4-856a-4282-91c3-53aabc62e6ff",
"metadata": {},
"source": [
"We next generate the \"sub\" documents by splitting the original documents. Note that we store the document identifier in the `metadata` of the corresponding [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html) object."
"We next generate the \"sub\" documents by splitting the original documents. Note that we store the document identifier in the `metadata` of the corresponding [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) object."
]
},
{
@ -207,7 +207,7 @@
"id": "cdef8339-f9fa-4b3b-955f-ad9dbdf2734f",
"metadata": {},
"source": [
"The default search type the retriever performs on the vector database is a similarity search. LangChain vector stores also support searching via [Max Marginal Relevance](https://python.langchain.com/v0.2/api_reference/core/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search). This can be controlled via the `search_type` parameter of the retriever:"
"The default search type the retriever performs on the vector database is a similarity search. LangChain vector stores also support searching via [Max Marginal Relevance](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search). This can be controlled via the `search_type` parameter of the retriever:"
]
},
{
@ -244,7 +244,7 @@
"\n",
"A summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.\n",
"\n",
"We construct a simple [chain](/docs/how_to/sequence) that will receive an input [Document](https://python.langchain.com/v0.2/api_reference/core/documents/langchain_core.documents.base.Document.html) object and generate a summary using a LLM.\n",
"We construct a simple [chain](/docs/how_to/sequence) that will receive an input [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) object and generate a summary using a LLM.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
@ -294,7 +294,7 @@
"id": "3faa9fde-1b09-4849-a815-8b2e89c30a02",
"metadata": {},
"source": [
"Note that we can [batch](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain accross documents:"
"Note that we can [batch](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain accross documents:"
]
},
{
@ -440,7 +440,7 @@
"source": [
"from typing import List\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class HypotheticalQuestions(BaseModel):\n",

View File

@ -24,8 +24,8 @@
"from typing import List\n",
"\n",
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI"
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field"
]
},
{
@ -131,7 +131,7 @@
"id": "84498e02",
"metadata": {},
"source": [
"Find out api documentation for [OutputFixingParser](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser)."
"Find out api documentation for [OutputFixingParser](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser)."
]
},
{

View File

@ -30,7 +30,7 @@
"id": "ae909b7a",
"metadata": {},
"source": [
"The [`JsonOutputParser`](https://python.langchain.com/v0.2/api_reference/core/output_parsers/langchain_core.output_parsers.json.JsonOutputParser.html) is one built-in option for prompting for and then parsing JSON output. While it is similar in functionality to the [`PydanticOutputParser`](https://python.langchain.com/v0.2/api_reference/core/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html), it also supports streaming back partial JSON objects.\n",
"The [`JsonOutputParser`](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.json.JsonOutputParser.html) is one built-in option for prompting for and then parsing JSON output. While it is similar in functionality to the [`PydanticOutputParser`](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.pydantic.PydanticOutputParser.html), it also supports streaming back partial JSON objects.\n",
"\n",
"Here's an example of how it can be used alongside [Pydantic](https://docs.pydantic.dev/) to conveniently declare the expected schema:"
]
@ -47,7 +47,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@ -71,8 +72,8 @@
"source": [
"from langchain_core.output_parsers import JsonOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field\n",
"\n",
"model = ChatOpenAI(temperature=0)\n",
"\n",

View File

@ -20,8 +20,8 @@
"from langchain.output_parsers import OutputFixingParser\n",
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI, OpenAI"
"from langchain_openai import ChatOpenAI, OpenAI\n",
"from pydantic import BaseModel, Field"
]
},
{
@ -244,7 +244,7 @@
"id": "e3a2513a",
"metadata": {},
"source": [
"Find out api documentation for [RetryOutputParser](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.retry.RetryOutputParser.html#langchain.output_parsers.retry.RetryOutputParser)."
"Find out api documentation for [RetryOutputParser](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.retry.RetryOutputParser.html#langchain.output_parsers.retry.RetryOutputParser)."
]
},
{

View File

@ -35,17 +35,17 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 1,
"id": "1594b2bf-2a6f-47bb-9a81-38930f8e606b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Joke(setup='Why did the chicken cross the road?', punchline='To get to the other side!')"
"Joke(setup='Why did the tomato turn red?', punchline='Because it saw the salad dressing!')"
]
},
"execution_count": 6,
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
@ -53,8 +53,8 @@
"source": [
"from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"from langchain_openai import OpenAI\n",
"from pydantic import BaseModel, Field, model_validator\n",
"\n",
"model = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", temperature=0.0)\n",
"\n",
@ -65,11 +65,13 @@
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
"\n",
" # You can add custom validation logic easily with Pydantic.\n",
" @validator(\"setup\")\n",
" def question_ends_with_question_mark(cls, field):\n",
" if field[-1] != \"?\":\n",
" @model_validator(mode=\"before\")\n",
" @classmethod\n",
" def question_ends_with_question_mark(cls, values: dict) -> dict:\n",
" setup = values[\"setup\"]\n",
" if setup[-1] != \"?\":\n",
" raise ValueError(\"Badly formed question!\")\n",
" return field\n",
" return values\n",
"\n",
"\n",
"# Set up a parser + inject instructions into the prompt template.\n",
@ -239,9 +241,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv-311",
"language": "python",
"name": "python3"
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
@ -253,7 +255,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -20,7 +20,7 @@
"\n",
"LLMs from different providers often have different strengths depending on the specific data they are trianed on. This also means that some may be \"better\" and more reliable at generating output in formats other than JSON.\n",
"\n",
"This guide shows you how to use the [`XMLOutputParser`](https://python.langchain.com/v0.2/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html) to prompt models for XML output, then and parse that output into a usable format.\n",
"This guide shows you how to use the [`XMLOutputParser`](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html) to prompt models for XML output, then and parse that output into a usable format.\n",
"\n",
":::{.callout-note}\n",
"Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed XML.\n",
@ -41,7 +41,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()"
]
},
{

View File

@ -39,7 +39,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
@ -47,7 +48,7 @@
"id": "cc479f3a",
"metadata": {},
"source": [
"We use [Pydantic](https://docs.pydantic.dev) with the [`YamlOutputParser`](https://python.langchain.com/v0.2/api_reference/langchain/output_parsers/langchain.output_parsers.yaml.YamlOutputParser.html#langchain.output_parsers.yaml.YamlOutputParser) to declare our data model and give the model more context as to what type of YAML it should generate:"
"We use [Pydantic](https://docs.pydantic.dev) with the [`YamlOutputParser`](https://python.langchain.com/api_reference/langchain/output_parsers/langchain.output_parsers.yaml.YamlOutputParser.html#langchain.output_parsers.yaml.YamlOutputParser) to declare our data model and give the model more context as to what type of YAML it should generate:"
]
},
{
@ -70,8 +71,8 @@
"source": [
"from langchain.output_parsers import YamlOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"# Define your desired data structure.\n",

View File

@ -26,7 +26,7 @@
"\n",
":::\n",
"\n",
"The [`RunnableParallel`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.RunnableParallel.html) primitive is essentially a dict whose values are runnables (or things that can be coerced to runnables, like functions). It runs all of its values in parallel, and each value is called with the overall input of the `RunnableParallel`. The final return value is a dict with the results of each value under its appropriate key.\n",
"The [`RunnableParallel`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableParallel.html) primitive is essentially a dict whose values are runnables (or things that can be coerced to runnables, like functions). It runs all of its values in parallel, and each value is called with the overall input of the `RunnableParallel`. The final return value is a dict with the results of each value under its appropriate key.\n",
"\n",
"## Formatting with `RunnableParallels`\n",
"\n",
@ -60,7 +60,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{

View File

@ -29,7 +29,7 @@
":::\n",
"\n",
"\n",
"When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n",
"When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n",
"\n",
"See the example below:"
]
@ -46,7 +46,8 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{

View File

@ -102,7 +102,7 @@
"source": [
"A chat prompt is made up a of a list of messages. Similarly to the above example, we can concatenate chat prompt templates. Each new element is a new message in the final prompt.\n",
"\n",
"First, let's initialize the a [`ChatPromptTemplate`](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) with a [`SystemMessage`](https://python.langchain.com/v0.2/api_reference/core/messages/langchain_core.messages.system.SystemMessage.html)."
"First, let's initialize the a [`ChatPromptTemplate`](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.chat.ChatPromptTemplate.html) with a [`SystemMessage`](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.system.SystemMessage.html)."
]
},
{
@ -123,7 +123,7 @@
"metadata": {},
"source": [
"You can then easily create a pipeline combining it with other messages *or* message templates.\n",
"Use a `Message` when there is no variables to be formatted, use a `MessageTemplate` when there are variables to be formatted. You can also use just a string (note: this will automatically get inferred as a [`HumanMessagePromptTemplate`](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.chat.HumanMessagePromptTemplate.html).)"
"Use a `Message` when there is no variables to be formatted, use a `MessageTemplate` when there are variables to be formatted. You can also use just a string (note: this will automatically get inferred as a [`HumanMessagePromptTemplate`](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.chat.HumanMessagePromptTemplate.html).)"
]
},
{
@ -183,7 +183,7 @@
"id": "0a5892f9-e4d8-4b7c-b6a5-4651539b9734",
"metadata": {},
"source": [
"LangChain includes a class called [`PipelinePromptTemplate`](https://python.langchain.com/v0.2/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html), which can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts:\n",
"LangChain includes a class called [`PipelinePromptTemplate`](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html), which can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts:\n",
"\n",
"- Final prompt: The final prompt that is returned\n",
"- Pipeline prompts: A list of tuples, consisting of a string name and a prompt template. Each prompt template will be formatted and then passed to future prompt templates as a variable with the same name."

View File

@ -1,178 +1,9 @@
# How to use LangChain with different Pydantic versions
- Pydantic v2 was released in June, 2023 (https://docs.pydantic.dev/2.0/blog/pydantic-v2-final/).
- v2 contains has a number of breaking changes (https://docs.pydantic.dev/2.0/migration/).
- Pydantic 1 End of Life was in June 2024. LangChain will be dropping support for Pydantic 1 in the near future,
and likely migrating internally to Pydantic 2. The timeline is tentatively September. This change will be accompanied by a minor version bump in the main langchain packages to version 0.3.x.
As of the `0.3` release, LangChain uses Pydantic 2 internally.
As of `langchain>=0.0.267`, LangChain allows users to install either Pydantic V1 or V2.
Users should install Pydantic 2 and are advised to **avoid** using the `pydantic.v1` namespace of Pydantic 2 with
LangChain APIs.
Internally, LangChain continues to use the [Pydantic V1](https://docs.pydantic.dev/latest/migration/#continue-using-pydantic-v1-features) via
the v1 namespace of Pydantic 2.
Because Pydantic does not support mixing .v1 and .v2 objects, users should be aware of a number of issues
when using LangChain with Pydantic.
:::caution
While LangChain supports Pydantic V2 objects in some APIs (listed below), it's suggested that users keep using Pydantic V1 objects until LangChain 0.3 is released.
:::
## 1. Passing Pydantic objects to LangChain APIs
Most LangChain APIs for *tool usage* (see list below) have been updated to accept either Pydantic v1 or v2 objects.
* Pydantic v1 objects correspond to subclasses of `pydantic.BaseModel` if `pydantic 1` is installed or subclasses of `pydantic.v1.BaseModel` if `pydantic 2` is installed.
* Pydantic v2 objects correspond to subclasses of `pydantic.BaseModel` if `pydantic 2` is installed.
| API | Pydantic 1 | Pydantic 2 |
|----------------------------------------|------------|----------------------------------------------------------------|
| `BaseChatModel.bind_tools` | Yes | langchain-core>=0.2.23, appropriate version of partner package |
| `BaseChatModel.with_structured_output` | Yes | langchain-core>=0.2.23, appropriate version of partner package |
| `Tool.from_function` | Yes | langchain-core>=0.2.23 |
| `StructuredTool.from_function` | Yes | langchain-core>=0.2.23 |
Partner packages that accept pydantic v2 objects via `bind_tools` or `with_structured_output` APIs:
| Package Name | pydantic v1 | pydantic v2 |
|---------------------|-------------|-------------|
| langchain-mistralai | Yes | >=0.1.11 |
| langchain-anthropic | Yes | >=0.1.21 |
| langchain-robocorp | Yes | >=0.0.10 |
| langchain-openai | Yes | >=0.1.19 |
| langchain-fireworks | Yes | >=0.1.5 |
| langchain-aws | Yes | >=0.1.15 |
Additional partner packages will be updated to accept Pydantic v2 objects in the future.
If you are still seeing issues with these APIs or other APIs that accept Pydantic objects, please open an issue, and we'll
address it.
Example:
Prior to `langchain-core<0.2.23`, use Pydantic v1 objects when passing to LangChain APIs.
```python
from langchain_openai import ChatOpenAI
from pydantic.v1 import BaseModel # <-- Note v1 namespace
class Person(BaseModel):
"""Personal information"""
name: str
model = ChatOpenAI()
model = model.with_structured_output(Person)
model.invoke('Bob is a person.')
```
After `langchain-core>=0.2.23`, use either Pydantic v1 or v2 objects when passing to LangChain APIs.
```python
from langchain_openai import ChatOpenAI
from pydantic import BaseModel
class Person(BaseModel):
"""Personal information"""
name: str
model = ChatOpenAI()
model = model.with_structured_output(Person)
model.invoke('Bob is a person.')
```
## 2. Sub-classing LangChain models
Because LangChain internally uses Pydantic v1, if you are sub-classing LangChain models, you should use Pydantic v1
primitives.
**Example 1: Extending via inheritance**
**YES**
```python
from pydantic.v1 import validator
from langchain_core.tools import BaseTool
class CustomTool(BaseTool): # BaseTool is v1 code
x: int = Field(default=1)
def _run(*args, **kwargs):
return "hello"
@validator('x') # v1 code
@classmethod
def validate_x(cls, x: int) -> int:
return 1
CustomTool(
name='custom_tool',
description="hello",
x=1,
)
```
Mixing Pydantic v2 primitives with Pydantic v1 primitives can raise cryptic errors
**NO**
```python
from pydantic import Field, field_validator # pydantic v2
from langchain_core.tools import BaseTool
class CustomTool(BaseTool): # BaseTool is v1 code
x: int = Field(default=1)
def _run(*args, **kwargs):
return "hello"
@field_validator('x') # v2 code
@classmethod
def validate_x(cls, x: int) -> int:
return 1
CustomTool(
name='custom_tool',
description="hello",
x=1,
)
```
## 3. Disable run-time validation for LangChain objects used inside Pydantic v2 models
e.g.,
```python
from typing import Annotated
from langchain_openai import ChatOpenAI # <-- ChatOpenAI uses pydantic v1
from pydantic import BaseModel, SkipValidation
class Foo(BaseModel): # <-- BaseModel is from Pydantic v2
model: Annotated[ChatOpenAI, SkipValidation()]
Foo(model=ChatOpenAI(api_key="hello"))
```
## 4: LangServe cannot generate OpenAPI docs if running Pydantic 2
If you are using Pydantic 2, you will not be able to generate OpenAPI docs using LangServe.
If you need OpenAPI docs, your options are to either install Pydantic 1:
`pip install pydantic==1.10.17`
or else to use the `APIHandler` object in LangChain to manually create the
routes for your API.
See: https://python.langchain.com/v0.2/docs/langserve/#pydantic
If you're working with prior versions of LangChain, please see the following guide
on [Pydantic compatibility](https://python.langchain.com/v0.2/docs/how_to/pydantic_compatibility).

View File

@ -102,7 +102,7 @@
"source": [
"## Chains {#chains}\n",
"\n",
"In a conversational RAG application, queries issued to the retriever should be informed by the context of the conversation. LangChain provides a [create_history_aware_retriever](https://python.langchain.com/v0.2/api_reference/langchain/chains/langchain.chains.history_aware_retriever.create_history_aware_retriever.html) constructor to simplify this. It constructs a chain that accepts keys `input` and `chat_history` as input, and has the same output schema as a retriever. `create_history_aware_retriever` requires as inputs: \n",
"In a conversational RAG application, queries issued to the retriever should be informed by the context of the conversation. LangChain provides a [create_history_aware_retriever](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.history_aware_retriever.create_history_aware_retriever.html) constructor to simplify this. It constructs a chain that accepts keys `input` and `chat_history` as input, and has the same output schema as a retriever. `create_history_aware_retriever` requires as inputs: \n",
"\n",
"1. LLM;\n",
"2. Retriever;\n",
@ -155,7 +155,7 @@
"id": "15f8ad59-19de-42e3-85a8-3ba95ee0bd43",
"metadata": {},
"source": [
"For the retriever, we will use [WebBaseLoader](https://python.langchain.com/v0.2/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load the content of a web page. Here we instantiate a `Chroma` vectorstore and then use its [.as_retriever](https://python.langchain.com/v0.2/api_reference/core/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.as_retriever) method to build a retriever that can be incorporated into [LCEL](/docs/concepts/#langchain-expression-language) chains."
"For the retriever, we will use [WebBaseLoader](https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.web_base.WebBaseLoader.html) to load the content of a web page. Here we instantiate a `Chroma` vectorstore and then use its [.as_retriever](https://python.langchain.com/api_reference/core/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.as_retriever) method to build a retriever that can be incorporated into [LCEL](/docs/concepts/#langchain-expression-language) chains."
]
},
{
@ -260,9 +260,9 @@
"\n",
"Now we can build our full QA chain.\n",
"\n",
"As in the [RAG tutorial](/docs/tutorials/rag), we will use [create_stuff_documents_chain](https://python.langchain.com/v0.2/api_reference/langchain/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) to generate a `question_answer_chain`, with input keys `context`, `chat_history`, and `input`-- it accepts the retrieved context alongside the conversation history and query to generate an answer.\n",
"As in the [RAG tutorial](/docs/tutorials/rag), we will use [create_stuff_documents_chain](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) to generate a `question_answer_chain`, with input keys `context`, `chat_history`, and `input`-- it accepts the retrieved context alongside the conversation history and query to generate an answer.\n",
"\n",
"We build our final `rag_chain` with [create_retrieval_chain](https://python.langchain.com/v0.2/api_reference/langchain/chains/langchain.chains.retrieval.create_retrieval_chain.html). This chain applies the `history_aware_retriever` and `question_answer_chain` in sequence, retaining intermediate outputs such as the retrieved context for convenience. It has input keys `input` and `chat_history`, and includes `input`, `chat_history`, `context`, and `answer` in its output."
"We build our final `rag_chain` with [create_retrieval_chain](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.retrieval.create_retrieval_chain.html). This chain applies the `history_aware_retriever` and `question_answer_chain` in sequence, retaining intermediate outputs such as the retrieved context for convenience. It has input keys `input` and `chat_history`, and includes `input`, `chat_history`, `context`, and `answer` in its output."
]
},
{
@ -305,7 +305,7 @@
"1. An object for storing the chat history;\n",
"2. An object that wraps our chain and manages updates to the chat history.\n",
"\n",
"For these we will use [BaseChatMessageHistory](https://python.langchain.com/v0.2/api_reference/core/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html) and [RunnableWithMessageHistory](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html). The latter is a wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n",
"For these we will use [BaseChatMessageHistory](https://python.langchain.com/api_reference/core/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html) and [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html). The latter is a wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n",
"\n",
"For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/how_to/message_history/) LCEL how-to guide.\n",
"\n",

View File

@ -19,7 +19,7 @@
"\n",
"We generally suggest using the first item of the list that works for your use-case. That is, if your model supports tool-calling, try methods 1 or 2; otherwise, or if those fail, advance down the list.\n",
"\n",
"Let's first create a simple RAG chain. To start we'll just retrieve from Wikipedia using the [WikipediaRetriever](https://python.langchain.com/v0.2/api_reference/community/retrievers/langchain_community.retrievers.wikipedia.WikipediaRetriever.html)."
"Let's first create a simple RAG chain. To start we'll just retrieve from Wikipedia using the [WikipediaRetriever](https://python.langchain.com/api_reference/community/retrievers/langchain_community.retrievers.wikipedia.WikipediaRetriever.html)."
]
},
{
@ -253,7 +253,7 @@
"source": [
"## Function-calling\n",
"\n",
"If your LLM of choice implements a [tool-calling](/docs/concepts#functiontool-calling) feature, you can use it to make the model specify which of the provided documents it's referencing when generating its answer. LangChain tool-calling models implement a `.with_structured_output` method which will force generation adhering to a desired schema (see for example [here](https://python.langchain.com/v0.2/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.with_structured_output)).\n",
"If your LLM of choice implements a [tool-calling](/docs/concepts#functiontool-calling) feature, you can use it to make the model specify which of the provided documents it's referencing when generating its answer. LangChain tool-calling models implement a `.with_structured_output` method which will force generation adhering to a desired schema (see for example [here](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.with_structured_output)).\n",
"\n",
"### Cite documents\n",
"\n",
@ -269,7 +269,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class CitedAnswer(BaseModel):\n",
@ -616,7 +616,7 @@
"\n",
"1. We update the formatting function to wrap the retrieved context in XML tags;\n",
"2. We do not use `.with_structured_output` (e.g., because it does not exist for a model);\n",
"3. We use [XMLOutputParser](https://python.langchain.com/v0.2/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html) in place of `StrOutputParser` to parse the answer into a dict."
"3. We use [XMLOutputParser](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html) in place of `StrOutputParser` to parse the answer into a dict."
]
},
{
@ -711,7 +711,7 @@
"source": [
"## Retrieval post-processing\n",
"\n",
"Another approach is to post-process our retrieved documents to compress the content, so that the source content is already minimal enough that we don't need the model to cite specific sources or spans. For example, we could break up each document into a sentence or two, embed those and keep only the most relevant ones. LangChain has some built-in components for this. Here we'll use a [RecursiveCharacterTextSplitter](https://python.langchain.com/v0.2/api_reference/text_splitters/text_splitter/langchain_text_splitters.RecursiveCharacterTextSplitter.html#langchain_text_splitters.RecursiveCharacterTextSplitter), which creates chunks of a sepacified size by splitting on separator substrings, and an [EmbeddingsFilter](https://python.langchain.com/v0.2/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.embeddings_filter.EmbeddingsFilter.html#langchain.retrievers.document_compressors.embeddings_filter.EmbeddingsFilter), which keeps only the texts with the most relevant embeddings.\n",
"Another approach is to post-process our retrieved documents to compress the content, so that the source content is already minimal enough that we don't need the model to cite specific sources or spans. For example, we could break up each document into a sentence or two, embed those and keep only the most relevant ones. LangChain has some built-in components for this. Here we'll use a [RecursiveCharacterTextSplitter](https://python.langchain.com/api_reference/text_splitters/text_splitter/langchain_text_splitters.RecursiveCharacterTextSplitter.html#langchain_text_splitters.RecursiveCharacterTextSplitter), which creates chunks of a sepacified size by splitting on separator substrings, and an [EmbeddingsFilter](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.document_compressors.embeddings_filter.EmbeddingsFilter.html#langchain.retrievers.document_compressors.embeddings_filter.EmbeddingsFilter), which keeps only the texts with the most relevant embeddings.\n",
"\n",
"This approach effectively swaps our original retriever with an updated one that compresses the documents. To start, we build the retriever:"
]

View File

@ -13,7 +13,7 @@
"\n",
"We will cover two approaches:\n",
"\n",
"1. Using the built-in [create_retrieval_chain](https://python.langchain.com/v0.2/api_reference/langchain/chains/langchain.chains.retrieval.create_retrieval_chain.html), which returns sources by default;\n",
"1. Using the built-in [create_retrieval_chain](https://python.langchain.com/api_reference/langchain/chains/langchain.chains.retrieval.create_retrieval_chain.html), which returns sources by default;\n",
"2. Using a simple [LCEL](/docs/concepts#langchain-expression-language-lcel) implementation, to show the operating principle.\n",
"\n",
"We will also show how to structure sources into the model response, such that a model can report what specific sources it used in generating its answer."

View File

@ -328,7 +328,7 @@
"id": "8b2d224d-2a82-418b-b562-01ea210b86ef",
"metadata": {},
"source": [
"More simply, we can use the [.pick](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.pick) method to select only the desired key:"
"More simply, we can use the [.pick](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.pick) method to select only the desired key:"
]
},
{
@ -432,7 +432,7 @@
"\n",
"To stream intermediate output, we recommend use of the async `.astream_events` method. This method will stream output from all \"events\" in the chain, and can be quite verbose. We can filter using tags, event types, and other criteria, as we do here.\n",
"\n",
"Below we show a typical `.astream_events` loop, where we pass in the chain input and emit desired results. See the [API reference](https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events) and [streaming guide](/docs/how_to/streaming) for more detail."
"Below we show a typical `.astream_events` loop, where we pass in the chain input and emit desired results. See the [API reference](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events) and [streaming guide](/docs/how_to/streaming) for more detail."
]
},
{

View File

@ -24,9 +24,16 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 1,
"id": "8ca446a0",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:35.834087Z",
"iopub.status.busy": "2024-09-11T02:32:35.833763Z",
"iopub.status.idle": "2024-09-11T02:32:36.588973Z",
"shell.execute_reply": "2024-09-11T02:32:36.588677Z"
}
},
"outputs": [],
"source": [
"from typing import Optional\n",
@ -40,7 +47,7 @@
")\n",
"from langchain_community.query_constructors.chroma import ChromaTranslator\n",
"from langchain_community.query_constructors.elasticsearch import ElasticsearchTranslator\n",
"from langchain_core.pydantic_v1 import BaseModel"
"from pydantic import BaseModel"
]
},
{
@ -53,9 +60,16 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 2,
"id": "64055006",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.590665Z",
"iopub.status.busy": "2024-09-11T02:32:36.590527Z",
"iopub.status.idle": "2024-09-11T02:32:36.592985Z",
"shell.execute_reply": "2024-09-11T02:32:36.592763Z"
}
},
"outputs": [],
"source": [
"class Search(BaseModel):\n",
@ -66,9 +80,16 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 3,
"id": "44eb6d98",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.594147Z",
"iopub.status.busy": "2024-09-11T02:32:36.594072Z",
"iopub.status.idle": "2024-09-11T02:32:36.595777Z",
"shell.execute_reply": "2024-09-11T02:32:36.595563Z"
}
},
"outputs": [],
"source": [
"search_query = Search(query=\"RAG\", start_year=2022, author=\"LangChain\")"
@ -76,9 +97,16 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 4,
"id": "e8ba6705",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.596902Z",
"iopub.status.busy": "2024-09-11T02:32:36.596824Z",
"iopub.status.idle": "2024-09-11T02:32:36.598805Z",
"shell.execute_reply": "2024-09-11T02:32:36.598629Z"
}
},
"outputs": [],
"source": [
"def construct_comparisons(query: Search):\n",
@ -104,9 +132,16 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 5,
"id": "6a79c9da",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.599989Z",
"iopub.status.busy": "2024-09-11T02:32:36.599909Z",
"iopub.status.idle": "2024-09-11T02:32:36.601521Z",
"shell.execute_reply": "2024-09-11T02:32:36.601306Z"
}
},
"outputs": [],
"source": [
"comparisons = construct_comparisons(search_query)"
@ -114,9 +149,16 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 6,
"id": "2d0e9689",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.602688Z",
"iopub.status.busy": "2024-09-11T02:32:36.602603Z",
"iopub.status.idle": "2024-09-11T02:32:36.604171Z",
"shell.execute_reply": "2024-09-11T02:32:36.603981Z"
}
},
"outputs": [],
"source": [
"_filter = Operation(operator=Operator.AND, arguments=comparisons)"
@ -124,9 +166,16 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 7,
"id": "e4c0b2ce",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.605267Z",
"iopub.status.busy": "2024-09-11T02:32:36.605190Z",
"iopub.status.idle": "2024-09-11T02:32:36.607993Z",
"shell.execute_reply": "2024-09-11T02:32:36.607796Z"
}
},
"outputs": [
{
"data": {
@ -135,7 +184,7 @@
" {'term': {'metadata.author.keyword': 'LangChain'}}]}}"
]
},
"execution_count": 18,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -146,9 +195,16 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 8,
"id": "d75455ae",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:32:36.609091Z",
"iopub.status.busy": "2024-09-11T02:32:36.609012Z",
"iopub.status.idle": "2024-09-11T02:32:36.611075Z",
"shell.execute_reply": "2024-09-11T02:32:36.610869Z"
}
},
"outputs": [
{
"data": {
@ -156,7 +212,7 @@
"{'$and': [{'start_year': {'$gt': 2022}}, {'author': {'$eq': 'LangChain'}}]}"
]
},
"execution_count": 19,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@ -182,7 +238,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -35,7 +35,14 @@
"cell_type": "code",
"execution_count": 1,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:48.329739Z",
"iopub.status.busy": "2024-09-11T02:33:48.329033Z",
"iopub.status.idle": "2024-09-11T02:33:48.334555Z",
"shell.execute_reply": "2024-09-11T02:33:48.334086Z"
}
},
"outputs": [],
"source": [
"# %pip install -qU langchain-core langchain-openai"
@ -53,15 +60,23 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:48.337140Z",
"iopub.status.busy": "2024-09-11T02:33:48.336958Z",
"iopub.status.idle": "2024-09-11T02:33:48.342671Z",
"shell.execute_reply": "2024-09-11T02:33:48.342281Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@ -80,14 +95,21 @@
},
{
"cell_type": "code",
"execution_count": 37,
"execution_count": 3,
"id": "0b51dd76-820d-41a4-98c8-893f6fe0d1ea",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:48.345004Z",
"iopub.status.busy": "2024-09-11T02:33:48.344838Z",
"iopub.status.idle": "2024-09-11T02:33:48.413166Z",
"shell.execute_reply": "2024-09-11T02:33:48.412908Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"sub_queries_description = \"\"\"\\\n",
"If the original question contains multiple distinct sub-questions, \\\n",
@ -121,9 +143,16 @@
},
{
"cell_type": "code",
"execution_count": 64,
"execution_count": 4,
"id": "783c03c3-8c72-4f88-9cf4-5829ce6745d6",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:48.414805Z",
"iopub.status.busy": "2024-09-11T02:33:48.414700Z",
"iopub.status.idle": "2024-09-11T02:33:49.023858Z",
"shell.execute_reply": "2024-09-11T02:33:49.023547Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
@ -143,7 +172,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Search)\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@ -158,17 +187,24 @@
},
{
"cell_type": "code",
"execution_count": 65,
"execution_count": 5,
"id": "0bcfce06-6f0c-4f9d-a1fc-dc29342d2aae",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:49.025536Z",
"iopub.status.busy": "2024-09-11T02:33:49.025437Z",
"iopub.status.idle": "2024-09-11T02:33:50.170550Z",
"shell.execute_reply": "2024-09-11T02:33:50.169835Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='web voyager vs reflection agents', sub_queries=['difference between web voyager and reflection agents', 'do web voyager and reflection agents use langgraph'], publish_year=None)"
"Search(query='difference between web voyager and reflection agents', sub_queries=['what is web voyager', 'what are reflection agents', 'do both web voyager and reflection agents use langgraph?'], publish_year=None)"
]
},
"execution_count": 65,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@ -193,9 +229,16 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 6,
"id": "15b4923d-a08e-452d-8889-9a09a57d1095",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.180367Z",
"iopub.status.busy": "2024-09-11T02:33:50.173961Z",
"iopub.status.idle": "2024-09-11T02:33:50.186703Z",
"shell.execute_reply": "2024-09-11T02:33:50.186090Z"
}
},
"outputs": [],
"source": [
"examples = []"
@ -203,9 +246,16 @@
},
{
"cell_type": "code",
"execution_count": 54,
"execution_count": 7,
"id": "da5330e6-827a-40e5-982b-b23b6286b758",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.189822Z",
"iopub.status.busy": "2024-09-11T02:33:50.189617Z",
"iopub.status.idle": "2024-09-11T02:33:50.195116Z",
"shell.execute_reply": "2024-09-11T02:33:50.194617Z"
}
},
"outputs": [],
"source": [
"question = \"What's chat langchain, is it a langchain template?\"\n",
@ -218,9 +268,16 @@
},
{
"cell_type": "code",
"execution_count": 55,
"execution_count": 8,
"id": "580e857a-27df-4ecf-a19c-458dc9244ec8",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.198178Z",
"iopub.status.busy": "2024-09-11T02:33:50.198002Z",
"iopub.status.idle": "2024-09-11T02:33:50.204115Z",
"shell.execute_reply": "2024-09-11T02:33:50.202534Z"
}
},
"outputs": [],
"source": [
"question = \"How to build multi-agent system and stream intermediate steps from it\"\n",
@ -238,9 +295,16 @@
},
{
"cell_type": "code",
"execution_count": 56,
"execution_count": 9,
"id": "fa63310d-69e3-4701-825c-fbb01f8a5a16",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.207416Z",
"iopub.status.busy": "2024-09-11T02:33:50.207196Z",
"iopub.status.idle": "2024-09-11T02:33:50.212484Z",
"shell.execute_reply": "2024-09-11T02:33:50.211974Z"
}
},
"outputs": [],
"source": [
"question = \"LangChain agents vs LangGraph?\"\n",
@ -266,9 +330,16 @@
},
{
"cell_type": "code",
"execution_count": 57,
"execution_count": 10,
"id": "68b03709-9a60-4acf-b96c-cafe1056c6f3",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.215540Z",
"iopub.status.busy": "2024-09-11T02:33:50.215250Z",
"iopub.status.idle": "2024-09-11T02:33:50.224108Z",
"shell.execute_reply": "2024-09-11T02:33:50.223490Z"
}
},
"outputs": [],
"source": [
"import uuid\n",
@ -313,9 +384,16 @@
},
{
"cell_type": "code",
"execution_count": 58,
"execution_count": 11,
"id": "d9bf9f87-3e6b-4fc2-957b-949b077fab54",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.227215Z",
"iopub.status.busy": "2024-09-11T02:33:50.226993Z",
"iopub.status.idle": "2024-09-11T02:33:50.231333Z",
"shell.execute_reply": "2024-09-11T02:33:50.230742Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import MessagesPlaceholder\n",
@ -329,17 +407,24 @@
},
{
"cell_type": "code",
"execution_count": 62,
"execution_count": 12,
"id": "e565ccb0-3530-4782-b56b-d1f6d0a8e559",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:33:50.233833Z",
"iopub.status.busy": "2024-09-11T02:33:50.233646Z",
"iopub.status.idle": "2024-09-11T02:33:51.318133Z",
"shell.execute_reply": "2024-09-11T02:33:51.317640Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='Difference between web voyager and reflection agents, do they both use LangGraph?', sub_queries=['What is Web Voyager', 'What are Reflection agents', 'Do Web Voyager and Reflection agents use LangGraph'], publish_year=None)"
"Search(query=\"What's the difference between web voyager and reflection agents? Do both use langgraph?\", sub_queries=['What is web voyager', 'What are reflection agents', 'Do web voyager and reflection agents use langgraph?'], publish_year=None)"
]
},
"execution_count": 62,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@ -377,7 +462,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -33,12 +33,20 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# %pip install -qU langchain langchain-community langchain-openai faker langchain-chroma"
"%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma"
]
},
{
@ -53,15 +61,23 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.036110Z",
"iopub.status.busy": "2024-09-11T02:34:54.035829Z",
"iopub.status.idle": "2024-09-11T02:34:54.038746Z",
"shell.execute_reply": "2024-09-11T02:34:54.038430Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@ -80,9 +96,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "e5ba65c2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.040738Z",
"iopub.status.busy": "2024-09-11T02:34:54.040515Z",
"iopub.status.idle": "2024-09-11T02:34:54.622643Z",
"shell.execute_reply": "2024-09-11T02:34:54.622382Z"
}
},
"outputs": [],
"source": [
"from faker import Faker\n",
@ -102,17 +125,24 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "c901ea97",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.624195Z",
"iopub.status.busy": "2024-09-11T02:34:54.624106Z",
"iopub.status.idle": "2024-09-11T02:34:54.627231Z",
"shell.execute_reply": "2024-09-11T02:34:54.626971Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"'Hayley Gonzalez'"
"'Jacob Adams'"
]
},
"execution_count": 2,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@ -123,17 +153,24 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "b0d42ae2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.628545Z",
"iopub.status.busy": "2024-09-11T02:34:54.628460Z",
"iopub.status.idle": "2024-09-11T02:34:54.630474Z",
"shell.execute_reply": "2024-09-11T02:34:54.630282Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"'Jesse Knight'"
"'Eric Acevedo'"
]
},
"execution_count": 3,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@ -154,19 +191,33 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 6,
"id": "0ae69afc",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.631758Z",
"iopub.status.busy": "2024-09-11T02:34:54.631678Z",
"iopub.status.idle": "2024-09-11T02:34:54.666448Z",
"shell.execute_reply": "2024-09-11T02:34:54.666216Z"
}
},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field"
"from pydantic import BaseModel, Field, model_validator"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 7,
"id": "6c9485ce",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.667852Z",
"iopub.status.busy": "2024-09-11T02:34:54.667733Z",
"iopub.status.idle": "2024-09-11T02:34:54.700224Z",
"shell.execute_reply": "2024-09-11T02:34:54.700004Z"
}
},
"outputs": [],
"source": [
"class Search(BaseModel):\n",
@ -176,19 +227,17 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 8,
"id": "aebd704a",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/harrisonchase/workplace/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change.\n",
" warn_beta(\n"
]
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:54.701556Z",
"iopub.status.busy": "2024-09-11T02:34:54.701465Z",
"iopub.status.idle": "2024-09-11T02:34:55.179986Z",
"shell.execute_reply": "2024-09-11T02:34:55.179640Z"
}
],
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
@ -201,7 +250,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Search)\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@ -216,17 +265,24 @@
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 9,
"id": "cc0d344b",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:55.181603Z",
"iopub.status.busy": "2024-09-11T02:34:55.181500Z",
"iopub.status.idle": "2024-09-11T02:34:55.778884Z",
"shell.execute_reply": "2024-09-11T02:34:55.778324Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='books about aliens', author='Jesse Knight')"
"Search(query='aliens', author='Jesse Knight')"
]
},
"execution_count": 33,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@ -245,17 +301,24 @@
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 10,
"id": "82b6b2ad",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:55.784266Z",
"iopub.status.busy": "2024-09-11T02:34:55.782603Z",
"iopub.status.idle": "2024-09-11T02:34:56.206779Z",
"shell.execute_reply": "2024-09-11T02:34:56.206068Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='books about aliens', author='Jess Knight')"
"Search(query='aliens', author='Jess Knight')"
]
},
"execution_count": 34,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -276,9 +339,16 @@
},
{
"cell_type": "code",
"execution_count": 35,
"execution_count": 11,
"id": "98788a94",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:56.210043Z",
"iopub.status.busy": "2024-09-11T02:34:56.209657Z",
"iopub.status.idle": "2024-09-11T02:34:56.213962Z",
"shell.execute_reply": "2024-09-11T02:34:56.213413Z"
}
},
"outputs": [],
"source": [
"system = \"\"\"Generate a relevant search query for a library system.\n",
@ -299,9 +369,16 @@
},
{
"cell_type": "code",
"execution_count": 36,
"execution_count": 12,
"id": "e65412f5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:56.216144Z",
"iopub.status.busy": "2024-09-11T02:34:56.216005Z",
"iopub.status.idle": "2024-09-11T02:34:56.218754Z",
"shell.execute_reply": "2024-09-11T02:34:56.218416Z"
}
},
"outputs": [],
"source": [
"query_analyzer_all = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
@ -317,18 +394,17 @@
},
{
"cell_type": "code",
"execution_count": 37,
"execution_count": 13,
"id": "696b000f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error code: 400 - {'error': {'message': \"This model's maximum context length is 16385 tokens. However, your messages resulted in 33885 tokens (33855 in the messages, 30 in the functions). Please reduce the length of the messages or functions.\", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}}\n"
]
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:56.220827Z",
"iopub.status.busy": "2024-09-11T02:34:56.220680Z",
"iopub.status.idle": "2024-09-11T02:34:58.846872Z",
"shell.execute_reply": "2024-09-11T02:34:58.846273Z"
}
],
},
"outputs": [],
"source": [
"try:\n",
" res = query_analyzer_all.invoke(\"what are books about aliens by jess knight\")\n",
@ -346,9 +422,16 @@
},
{
"cell_type": "code",
"execution_count": 38,
"execution_count": 14,
"id": "0f0d0757",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:58.850318Z",
"iopub.status.busy": "2024-09-11T02:34:58.850100Z",
"iopub.status.idle": "2024-09-11T02:34:58.873883Z",
"shell.execute_reply": "2024-09-11T02:34:58.873525Z"
}
},
"outputs": [],
"source": [
"llm_long = ChatOpenAI(model=\"gpt-4-turbo-preview\", temperature=0)\n",
@ -358,17 +441,24 @@
},
{
"cell_type": "code",
"execution_count": 39,
"execution_count": 15,
"id": "03e5b7b2",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:34:58.875940Z",
"iopub.status.busy": "2024-09-11T02:34:58.875811Z",
"iopub.status.idle": "2024-09-11T02:35:02.947273Z",
"shell.execute_reply": "2024-09-11T02:35:02.946220Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='aliens', author='Kevin Knight')"
"Search(query='aliens', author='jess knight')"
]
},
"execution_count": 39,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
@ -389,9 +479,16 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 16,
"id": "32b19e07",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:02.951939Z",
"iopub.status.busy": "2024-09-11T02:35:02.951583Z",
"iopub.status.idle": "2024-09-11T02:35:41.777839Z",
"shell.execute_reply": "2024-09-11T02:35:41.777392Z"
}
},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
@ -403,9 +500,16 @@
},
{
"cell_type": "code",
"execution_count": 51,
"execution_count": 17,
"id": "774cb7b0",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:41.780883Z",
"iopub.status.busy": "2024-09-11T02:35:41.780774Z",
"iopub.status.idle": "2024-09-11T02:35:41.782739Z",
"shell.execute_reply": "2024-09-11T02:35:41.782498Z"
}
},
"outputs": [],
"source": [
"def select_names(question):\n",
@ -416,9 +520,16 @@
},
{
"cell_type": "code",
"execution_count": 52,
"execution_count": 18,
"id": "1173159c",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:41.783992Z",
"iopub.status.busy": "2024-09-11T02:35:41.783913Z",
"iopub.status.idle": "2024-09-11T02:35:41.785911Z",
"shell.execute_reply": "2024-09-11T02:35:41.785632Z"
}
},
"outputs": [],
"source": [
"create_prompt = {\n",
@ -429,9 +540,16 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 19,
"id": "0a892607",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:41.787082Z",
"iopub.status.busy": "2024-09-11T02:35:41.787008Z",
"iopub.status.idle": "2024-09-11T02:35:41.788543Z",
"shell.execute_reply": "2024-09-11T02:35:41.788362Z"
}
},
"outputs": [],
"source": [
"query_analyzer_select = create_prompt | structured_llm"
@ -439,17 +557,24 @@
},
{
"cell_type": "code",
"execution_count": 54,
"execution_count": 20,
"id": "8195d7cd",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:41.789624Z",
"iopub.status.busy": "2024-09-11T02:35:41.789551Z",
"iopub.status.idle": "2024-09-11T02:35:42.099839Z",
"shell.execute_reply": "2024-09-11T02:35:42.099042Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptValue(messages=[SystemMessage(content='Generate a relevant search query for a library system.\\n\\n`author` attribute MUST be one of:\\n\\nJesse Knight, Kelly Knight, Scott Knight, Richard Knight, Andrew Knight, Katherine Knight, Erica Knight, Ashley Knight, Becky Knight, Kevin Knight\\n\\nDo NOT hallucinate author name!'), HumanMessage(content='what are books by jess knight')])"
"ChatPromptValue(messages=[SystemMessage(content='Generate a relevant search query for a library system.\\n\\n`author` attribute MUST be one of:\\n\\nJennifer Knight, Jill Knight, John Knight, Dr. Jeffrey Knight, Christopher Knight, Andrea Knight, Brandy Knight, Jennifer Keller, Becky Chambers, Sarah Knapp\\n\\nDo NOT hallucinate author name!'), HumanMessage(content='what are books by jess knight')])"
]
},
"execution_count": 54,
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
@ -460,17 +585,24 @@
},
{
"cell_type": "code",
"execution_count": 55,
"execution_count": 21,
"id": "d3228b4e",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:42.106571Z",
"iopub.status.busy": "2024-09-11T02:35:42.105861Z",
"iopub.status.idle": "2024-09-11T02:35:42.909738Z",
"shell.execute_reply": "2024-09-11T02:35:42.908875Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='books about aliens', author='Jesse Knight')"
"Search(query='books about aliens', author='Jennifer Knight')"
]
},
"execution_count": 55,
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
@ -492,28 +624,45 @@
},
{
"cell_type": "code",
"execution_count": 47,
"execution_count": 22,
"id": "a2e8b434",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:42.915376Z",
"iopub.status.busy": "2024-09-11T02:35:42.914923Z",
"iopub.status.idle": "2024-09-11T02:35:42.923958Z",
"shell.execute_reply": "2024-09-11T02:35:42.922391Z"
}
},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import validator\n",
"\n",
"\n",
"class Search(BaseModel):\n",
" query: str\n",
" author: str\n",
"\n",
" @validator(\"author\")\n",
" def double(cls, v: str) -> str:\n",
" return vectorstore.similarity_search(v, k=1)[0].page_content"
" @model_validator(mode=\"before\")\n",
" @classmethod\n",
" def double(cls, values: dict) -> dict:\n",
" author = values[\"author\"]\n",
" closest_valid_author = vectorstore.similarity_search(author, k=1)[\n",
" 0\n",
" ].page_content\n",
" values[\"author\"] = closest_valid_author\n",
" return values"
]
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": 23,
"id": "919c0601",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:42.927718Z",
"iopub.status.busy": "2024-09-11T02:35:42.927428Z",
"iopub.status.idle": "2024-09-11T02:35:42.933784Z",
"shell.execute_reply": "2024-09-11T02:35:42.933344Z"
}
},
"outputs": [],
"source": [
"system = \"\"\"Generate a relevant search query for a library system\"\"\"\n",
@ -531,17 +680,24 @@
},
{
"cell_type": "code",
"execution_count": 50,
"execution_count": 24,
"id": "6c4f3e9a",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:42.936506Z",
"iopub.status.busy": "2024-09-11T02:35:42.936186Z",
"iopub.status.idle": "2024-09-11T02:35:43.711754Z",
"shell.execute_reply": "2024-09-11T02:35:43.710695Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='books about aliens', author='Jesse Knight')"
"Search(query='aliens', author='John Knight')"
]
},
"execution_count": 50,
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
@ -552,9 +708,16 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 25,
"id": "a309cb11",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:35:43.717567Z",
"iopub.status.busy": "2024-09-11T02:35:43.717189Z",
"iopub.status.idle": "2024-09-11T02:35:43.722339Z",
"shell.execute_reply": "2024-09-11T02:35:43.720537Z"
}
},
"outputs": [],
"source": [
"# TODO: show trigram similarity"
@ -563,9 +726,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv-311",
"language": "python",
"name": "python3"
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
@ -577,7 +740,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -33,10 +33,25 @@
"cell_type": "code",
"execution_count": 1,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:53.160868Z",
"iopub.status.busy": "2024-09-11T02:41:53.160512Z",
"iopub.status.idle": "2024-09-11T02:41:57.605370Z",
"shell.execute_reply": "2024-09-11T02:41:57.604888Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# %pip install -qU langchain langchain-community langchain-openai langchain-chroma"
"%pip install -qU langchain langchain-community langchain-openai langchain-chroma"
]
},
{
@ -51,15 +66,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:57.607874Z",
"iopub.status.busy": "2024-09-11T02:41:57.607697Z",
"iopub.status.idle": "2024-09-11T02:41:57.610422Z",
"shell.execute_reply": "2024-09-11T02:41:57.610012Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@ -78,9 +101,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "1f621694",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:57.612276Z",
"iopub.status.busy": "2024-09-11T02:41:57.612146Z",
"iopub.status.idle": "2024-09-11T02:41:59.074590Z",
"shell.execute_reply": "2024-09-11T02:41:59.074052Z"
}
},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
@ -108,14 +138,21 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "0b51dd76-820d-41a4-98c8-893f6fe0d1ea",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:59.077712Z",
"iopub.status.busy": "2024-09-11T02:41:59.077514Z",
"iopub.status.idle": "2024-09-11T02:41:59.081509Z",
"shell.execute_reply": "2024-09-11T02:41:59.081112Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Search(BaseModel):\n",
@ -129,19 +166,17 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "783c03c3-8c72-4f88-9cf4-5829ce6745d6",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/harrisonchase/workplace/langchain/libs/core/langchain_core/_api/beta_decorator.py:86: LangChainBetaWarning: The function `with_structured_output` is in beta. It is actively being worked on, so the API may change.\n",
" warn_beta(\n"
]
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:59.083613Z",
"iopub.status.busy": "2024-09-11T02:41:59.083492Z",
"iopub.status.idle": "2024-09-11T02:41:59.204636Z",
"shell.execute_reply": "2024-09-11T02:41:59.204377Z"
}
],
},
"outputs": [],
"source": [
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
@ -159,7 +194,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Search)\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@ -174,17 +209,24 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 6,
"id": "bc1d3863",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:59.206178Z",
"iopub.status.busy": "2024-09-11T02:41:59.206101Z",
"iopub.status.idle": "2024-09-11T02:41:59.817758Z",
"shell.execute_reply": "2024-09-11T02:41:59.817310Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(queries=['Harrison work location'])"
"Search(queries=['Harrison Work', 'Harrison employment history'])"
]
},
"execution_count": 4,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@ -195,17 +237,24 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 7,
"id": "af62af17-4f90-4dbd-a8b4-dfff51f1db95",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:41:59.820168Z",
"iopub.status.busy": "2024-09-11T02:41:59.819990Z",
"iopub.status.idle": "2024-09-11T02:42:00.309034Z",
"shell.execute_reply": "2024-09-11T02:42:00.308578Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(queries=['Harrison work place', 'Ankush work place'])"
"Search(queries=['Harrison work history', 'Ankush work history'])"
]
},
"execution_count": 5,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -226,9 +275,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 8,
"id": "1e047d87",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:00.311131Z",
"iopub.status.busy": "2024-09-11T02:42:00.310972Z",
"iopub.status.idle": "2024-09-11T02:42:00.313365Z",
"shell.execute_reply": "2024-09-11T02:42:00.313025Z"
}
},
"outputs": [],
"source": [
"from langchain_core.runnables import chain"
@ -236,9 +292,16 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": 9,
"id": "8dac7866",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:00.315138Z",
"iopub.status.busy": "2024-09-11T02:42:00.315016Z",
"iopub.status.idle": "2024-09-11T02:42:00.317427Z",
"shell.execute_reply": "2024-09-11T02:42:00.317088Z"
}
},
"outputs": [],
"source": [
"@chain\n",
@ -255,17 +318,25 @@
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 10,
"id": "232ad8a7-7990-4066-9228-d35a555f7293",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:00.318951Z",
"iopub.status.busy": "2024-09-11T02:42:00.318829Z",
"iopub.status.idle": "2024-09-11T02:42:01.512855Z",
"shell.execute_reply": "2024-09-11T02:42:01.512321Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Harrison worked at Kensho')]"
"[Document(page_content='Harrison worked at Kensho'),\n",
" Document(page_content='Harrison worked at Kensho')]"
]
},
"execution_count": 33,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -276,9 +347,16 @@
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 11,
"id": "28e14ba5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:01.515743Z",
"iopub.status.busy": "2024-09-11T02:42:01.515400Z",
"iopub.status.idle": "2024-09-11T02:42:02.349930Z",
"shell.execute_reply": "2024-09-11T02:42:02.349382Z"
}
},
"outputs": [
{
"data": {
@ -287,7 +365,7 @@
" Document(page_content='Ankush worked at Facebook')]"
]
},
"execution_count": 34,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@ -321,7 +399,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -33,10 +33,25 @@
"cell_type": "code",
"execution_count": 1,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:13.105266Z",
"iopub.status.busy": "2024-09-11T02:42:13.104556Z",
"iopub.status.idle": "2024-09-11T02:42:17.936922Z",
"shell.execute_reply": "2024-09-11T02:42:17.936478Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# %pip install -qU langchain langchain-community langchain-openai langchain-chroma"
"%pip install -qU langchain langchain-community langchain-openai langchain-chroma"
]
},
{
@ -51,15 +66,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:17.939072Z",
"iopub.status.busy": "2024-09-11T02:42:17.938929Z",
"iopub.status.idle": "2024-09-11T02:42:17.941266Z",
"shell.execute_reply": "2024-09-11T02:42:17.940968Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@ -78,9 +101,16 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 3,
"id": "1f621694",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:17.942794Z",
"iopub.status.busy": "2024-09-11T02:42:17.942674Z",
"iopub.status.idle": "2024-09-11T02:42:19.939459Z",
"shell.execute_reply": "2024-09-11T02:42:19.938842Z"
}
},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
@ -110,14 +140,21 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 4,
"id": "0b51dd76-820d-41a4-98c8-893f6fe0d1ea",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:19.942780Z",
"iopub.status.busy": "2024-09-11T02:42:19.942567Z",
"iopub.status.idle": "2024-09-11T02:42:19.947709Z",
"shell.execute_reply": "2024-09-11T02:42:19.947252Z"
}
},
"outputs": [],
"source": [
"from typing import List, Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Search(BaseModel):\n",
@ -135,9 +172,16 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 5,
"id": "783c03c3-8c72-4f88-9cf4-5829ce6745d6",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:19.949936Z",
"iopub.status.busy": "2024-09-11T02:42:19.949778Z",
"iopub.status.idle": "2024-09-11T02:42:20.073883Z",
"shell.execute_reply": "2024-09-11T02:42:20.073556Z"
}
},
"outputs": [],
"source": [
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
@ -154,7 +198,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.with_structured_output(Search)\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@ -169,17 +213,24 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 6,
"id": "bc1d3863",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:20.075511Z",
"iopub.status.busy": "2024-09-11T02:42:20.075428Z",
"iopub.status.idle": "2024-09-11T02:42:20.902011Z",
"shell.execute_reply": "2024-09-11T02:42:20.901558Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='workplace', person='HARRISON')"
"Search(query='work history', person='HARRISON')"
]
},
"execution_count": 19,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@ -190,17 +241,24 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 7,
"id": "af62af17-4f90-4dbd-a8b4-dfff51f1db95",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:20.904384Z",
"iopub.status.busy": "2024-09-11T02:42:20.904195Z",
"iopub.status.idle": "2024-09-11T02:42:21.468172Z",
"shell.execute_reply": "2024-09-11T02:42:21.467639Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"Search(query='workplace', person='ANKUSH')"
"Search(query='work history', person='ANKUSH')"
]
},
"execution_count": 20,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -221,9 +279,16 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 8,
"id": "1e047d87",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:21.470953Z",
"iopub.status.busy": "2024-09-11T02:42:21.470736Z",
"iopub.status.idle": "2024-09-11T02:42:21.473544Z",
"shell.execute_reply": "2024-09-11T02:42:21.473064Z"
}
},
"outputs": [],
"source": [
"from langchain_core.runnables import chain"
@ -231,9 +296,16 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 9,
"id": "4ec0c7fe",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:21.476024Z",
"iopub.status.busy": "2024-09-11T02:42:21.475835Z",
"iopub.status.idle": "2024-09-11T02:42:21.478359Z",
"shell.execute_reply": "2024-09-11T02:42:21.477932Z"
}
},
"outputs": [],
"source": [
"retrievers = {\n",
@ -244,9 +316,16 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 10,
"id": "8dac7866",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:21.480247Z",
"iopub.status.busy": "2024-09-11T02:42:21.480084Z",
"iopub.status.idle": "2024-09-11T02:42:21.482732Z",
"shell.execute_reply": "2024-09-11T02:42:21.482382Z"
}
},
"outputs": [],
"source": [
"@chain\n",
@ -258,9 +337,16 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 11,
"id": "232ad8a7-7990-4066-9228-d35a555f7293",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:21.484480Z",
"iopub.status.busy": "2024-09-11T02:42:21.484361Z",
"iopub.status.idle": "2024-09-11T02:42:22.136704Z",
"shell.execute_reply": "2024-09-11T02:42:22.136244Z"
}
},
"outputs": [
{
"data": {
@ -268,7 +354,7 @@
"[Document(page_content='Harrison worked at Kensho')]"
]
},
"execution_count": 24,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@ -279,9 +365,16 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 12,
"id": "28e14ba5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:22.139305Z",
"iopub.status.busy": "2024-09-11T02:42:22.139106Z",
"iopub.status.idle": "2024-09-11T02:42:23.479739Z",
"shell.execute_reply": "2024-09-11T02:42:23.479170Z"
}
},
"outputs": [
{
"data": {
@ -289,7 +382,7 @@
"[Document(page_content='Ankush worked at Facebook')]"
]
},
"execution_count": 25,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@ -323,7 +416,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

View File

@ -35,10 +35,25 @@
"cell_type": "code",
"execution_count": 1,
"id": "e168ef5c-e54e-49a6-8552-5502854a6f01",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:33.121714Z",
"iopub.status.busy": "2024-09-11T02:42:33.121392Z",
"iopub.status.idle": "2024-09-11T02:42:36.998607Z",
"shell.execute_reply": "2024-09-11T02:42:36.998126Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# %pip install -qU langchain langchain-community langchain-openai langchain-chroma"
"%pip install -qU langchain langchain-community langchain-openai langchain-chroma"
]
},
{
@ -53,15 +68,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "40e2979e-a818-4b96-ac25-039336f94319",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:37.001017Z",
"iopub.status.busy": "2024-09-11T02:42:37.000859Z",
"iopub.status.idle": "2024-09-11T02:42:37.003704Z",
"shell.execute_reply": "2024-09-11T02:42:37.003335Z"
}
},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
@ -80,9 +103,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "1f621694",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:37.005644Z",
"iopub.status.busy": "2024-09-11T02:42:37.005493Z",
"iopub.status.idle": "2024-09-11T02:42:38.288481Z",
"shell.execute_reply": "2024-09-11T02:42:38.287904Z"
}
},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
@ -110,14 +140,21 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "0b51dd76-820d-41a4-98c8-893f6fe0d1ea",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:38.291700Z",
"iopub.status.busy": "2024-09-11T02:42:38.291468Z",
"iopub.status.idle": "2024-09-11T02:42:38.295796Z",
"shell.execute_reply": "2024-09-11T02:42:38.295205Z"
}
},
"outputs": [],
"source": [
"from typing import Optional\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"class Search(BaseModel):\n",
@ -131,9 +168,16 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "783c03c3-8c72-4f88-9cf4-5829ce6745d6",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:38.297840Z",
"iopub.status.busy": "2024-09-11T02:42:38.297712Z",
"iopub.status.idle": "2024-09-11T02:42:38.420456Z",
"shell.execute_reply": "2024-09-11T02:42:38.420140Z"
}
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
@ -149,7 +193,7 @@
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
"structured_llm = llm.bind_tools([Search])\n",
"query_analyzer = {\"question\": RunnablePassthrough()} | prompt | structured_llm"
]
@ -164,17 +208,24 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 6,
"id": "bc1d3863",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:38.421934Z",
"iopub.status.busy": "2024-09-11T02:42:38.421831Z",
"iopub.status.idle": "2024-09-11T02:42:39.048915Z",
"shell.execute_reply": "2024-09-11T02:42:39.048519Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_ZnoVX4j9Mn8wgChaORyd1cvq', 'function': {'arguments': '{\"query\":\"Harrison\"}', 'name': 'Search'}, 'type': 'function'}]})"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_korLZrh08PTRL94f4L7rFqdj', 'function': {'arguments': '{\"query\":\"Harrison\"}', 'name': 'Search'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 95, 'total_tokens': 109}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_483d39d857', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-ea94d376-37bf-4f80-abe6-e3b42b767ea0-0', tool_calls=[{'name': 'Search', 'args': {'query': 'Harrison'}, 'id': 'call_korLZrh08PTRL94f4L7rFqdj', 'type': 'tool_call'}], usage_metadata={'input_tokens': 95, 'output_tokens': 14, 'total_tokens': 109})"
]
},
"execution_count": 4,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@ -185,17 +236,24 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 7,
"id": "af62af17-4f90-4dbd-a8b4-dfff51f1db95",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:39.050923Z",
"iopub.status.busy": "2024-09-11T02:42:39.050785Z",
"iopub.status.idle": "2024-09-11T02:42:40.090421Z",
"shell.execute_reply": "2024-09-11T02:42:40.089454Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Hello! How can I assist you today?')"
"AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 93, 'total_tokens': 103}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_483d39d857', 'finish_reason': 'stop', 'logprobs': None}, id='run-ebdfc44a-455a-4ca6-be85-84559886b1e1-0', usage_metadata={'input_tokens': 93, 'output_tokens': 10, 'total_tokens': 103})"
]
},
"execution_count": 5,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -216,9 +274,16 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 8,
"id": "1e047d87",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:40.093716Z",
"iopub.status.busy": "2024-09-11T02:42:40.093472Z",
"iopub.status.idle": "2024-09-11T02:42:40.097732Z",
"shell.execute_reply": "2024-09-11T02:42:40.097274Z"
}
},
"outputs": [],
"source": [
"from langchain_core.output_parsers.openai_tools import PydanticToolsParser\n",
@ -229,9 +294,16 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 9,
"id": "8dac7866",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:40.100028Z",
"iopub.status.busy": "2024-09-11T02:42:40.099882Z",
"iopub.status.idle": "2024-09-11T02:42:40.103105Z",
"shell.execute_reply": "2024-09-11T02:42:40.102734Z"
}
},
"outputs": [],
"source": [
"@chain\n",
@ -248,9 +320,16 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 10,
"id": "232ad8a7-7990-4066-9228-d35a555f7293",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:40.105092Z",
"iopub.status.busy": "2024-09-11T02:42:40.104917Z",
"iopub.status.idle": "2024-09-11T02:42:41.341967Z",
"shell.execute_reply": "2024-09-11T02:42:41.341455Z"
}
},
"outputs": [
{
"name": "stderr",
@ -265,7 +344,7 @@
"[Document(page_content='Harrison worked at Kensho')]"
]
},
"execution_count": 8,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -276,17 +355,24 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 11,
"id": "28e14ba5",
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-11T02:42:41.344639Z",
"iopub.status.busy": "2024-09-11T02:42:41.344411Z",
"iopub.status.idle": "2024-09-11T02:42:41.798332Z",
"shell.execute_reply": "2024-09-11T02:42:41.798054Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Hello! How can I assist you today?')"
"AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 93, 'total_tokens': 103}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_483d39d857', 'finish_reason': 'stop', 'logprobs': None}, id='run-e87f058d-30c0-4075-8a89-a01b982d557e-0', usage_metadata={'input_tokens': 93, 'output_tokens': 10, 'total_tokens': 103})"
]
},
"execution_count": 9,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@ -320,7 +406,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.9"
}
},
"nbformat": 4,

Some files were not shown because too many files have changed in this diff Show More