Compare commits

..

2 Commits

Author SHA1 Message Date
Eugene Yurtsev
89b0cd8d6c xx 2024-07-08 17:07:50 -04:00
Eugene Yurtsev
d18651768a x 2024-07-08 17:02:48 -04:00
617 changed files with 13643 additions and 43596 deletions

View File

@@ -5,10 +5,10 @@ services:
dockerfile: libs/langchain/dev.Dockerfile
context: ..
volumes:
# Update this to wherever you want VS Code to mount the folder of your project
# Update this to wherever you want VS Code to mount the folder of your project
- ..:/workspaces/langchain:cached
networks:
- langchain-network
- langchain-network
# environment:
# MONGO_ROOT_USERNAME: root
# MONGO_ROOT_PASSWORD: example123
@@ -28,3 +28,5 @@ services:
networks:
langchain-network:
driver: bridge

View File

@@ -4,6 +4,9 @@ contact_links:
- name: 🤔 Question or Problem
about: Ask a question or ask about a problem in GitHub Discussions.
url: https://www.github.com/langchain-ai/langchain/discussions/categories/q-a
- name: Discord
url: https://discord.gg/6adMQxSpJS
about: General community discussions
- name: Feature Request
url: https://www.github.com/langchain-ai/langchain/discussions/categories/ideas
about: Suggest a feature or an idea

View File

@@ -6,7 +6,6 @@ import sys
import tomllib
from collections import defaultdict
from typing import Dict, List, Set
from pathlib import Path
LANGCHAIN_DIRS = [
@@ -27,48 +26,17 @@ def all_package_dirs() -> Set[str]:
def dependents_graph() -> dict:
"""
Construct a mapping of package -> dependents, such that we can
run tests on all dependents of a package when a change is made.
"""
dependents = defaultdict(set)
for path in glob.glob("./libs/**/pyproject.toml", recursive=True):
if "template" in path:
continue
# load regular and test deps from pyproject.toml
with open(path, "rb") as f:
pyproject = tomllib.load(f)["tool"]["poetry"]
pkg_dir = "libs" + "/".join(path.split("libs")[1].split("/")[:-1])
for dep in [
*pyproject["dependencies"].keys(),
*pyproject["group"]["test"]["dependencies"].keys(),
]:
for dep in pyproject["dependencies"]:
if "langchain" in dep:
dependents[dep].add(pkg_dir)
continue
# load extended deps from extended_testing_deps.txt
package_path = Path(path).parent
extended_requirement_path = package_path / "extended_testing_deps.txt"
if extended_requirement_path.exists():
with open(extended_requirement_path, "r") as f:
extended_deps = f.read().splitlines()
for depline in extended_deps:
if depline.startswith("-e "):
# editable dependency
assert depline.startswith(
"-e ../partners/"
), "Extended test deps should only editable install partner packages"
partner = depline.split("partners/")[1]
dep = f"langchain-{partner}"
else:
dep = depline.split("==")[0]
if "langchain" in dep:
dependents[dep].add(pkg_dir)
return dependents
@@ -85,44 +53,6 @@ def add_dependents(dirs_to_eval: Set[str], dependents: dict) -> List[str]:
return list(updated)
def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
min_python = "3.8"
max_python = "3.12"
# custom logic for specific directories
if dir_ == "libs/partners/milvus":
# milvus poetry doesn't allow 3.12 because they
# declare deps in funny way
max_python = "3.11"
return [
{"working-directory": dir_, "python-version": min_python},
{"working-directory": dir_, "python-version": max_python},
]
def _get_configs_for_multi_dirs(
job: str, dirs_to_run: List[str], dependents: dict
) -> List[Dict[str, str]]:
if job == "lint":
dirs = add_dependents(
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
dependents,
)
elif job in ["test", "compile-integration-tests", "dependencies"]:
dirs = add_dependents(
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
)
elif job == "extended-tests":
dirs = list(dirs_to_run["extended-test"])
else:
raise ValueError(f"Unknown job: {job}")
return [
config for dir_ in dirs for config in _get_configs_for_single_dir(job, dir_)
]
if __name__ == "__main__":
files = sys.argv[1:]
@@ -196,23 +126,17 @@ if __name__ == "__main__":
dependents = dependents_graph()
# we now have dirs_by_job
# todo: clean this up
map_job_to_configs = {
job: _get_configs_for_multi_dirs(job, dirs_to_run, dependents)
for job in [
"lint",
"test",
"extended-tests",
"compile-integration-tests",
"dependencies",
]
outputs = {
"dirs-to-lint": add_dependents(
dirs_to_run["lint"] | dirs_to_run["test"] | dirs_to_run["extended-test"],
dependents,
),
"dirs-to-test": add_dependents(
dirs_to_run["test"] | dirs_to_run["extended-test"], dependents
),
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
"docs-edited": "true" if docs_edited else "",
}
map_job_to_configs["test-doc-imports"] = (
[{"python-version": "3.12"}] if docs_edited else []
)
for key, value in map_job_to_configs.items():
for key, value in outputs.items():
json_output = json.dumps(value)
print(f"{key}={json_output}")

View File

@@ -1,35 +0,0 @@
import sys
import tomllib
if __name__ == "__main__":
# Get the TOML file path from the command line argument
toml_file = sys.argv[1]
# read toml file
with open(toml_file, "rb") as file:
toml_data = tomllib.load(file)
# see if we're releasing an rc
version = toml_data["tool"]["poetry"]["version"]
releasing_rc = "rc" in version
# if not, iterate through dependencies and make sure none allow prereleases
if not releasing_rc:
dependencies = toml_data["tool"]["poetry"]["dependencies"]
for lib in dependencies:
dep_version = dependencies[lib]
dep_version_string = (
dep_version["version"] if isinstance(dep_version, dict) else dep_version
)
if "rc" in dep_version_string:
raise ValueError(
f"Dependency {lib} has a prerelease version. Please remove this."
)
if isinstance(dep_version, dict) and dep_version.get(
"allow-prereleases", False
):
raise ValueError(
f"Dependency {lib} has allow-prereleases set to true. Please remove this."
)

View File

@@ -1,11 +1,6 @@
import sys
if sys.version_info >= (3, 11):
import tomllib
else:
# for python 3.10 and below, which doesnt have stdlib tomllib
import tomli as tomllib
import tomllib
from packaging.version import parse as parse_version
import re
@@ -14,11 +9,8 @@ MIN_VERSION_LIBS = [
"langchain-community",
"langchain",
"langchain-text-splitters",
"SQLAlchemy",
]
SKIP_IF_PULL_REQUEST = ["langchain-core"]
def get_min_version(version: str) -> str:
# base regex for x.x.x with cases for rc/post/etc
@@ -45,7 +37,7 @@ def get_min_version(version: str) -> str:
raise ValueError(f"Unrecognized version format: {version}")
def get_min_version_from_toml(toml_path: str, versions_for: str):
def get_min_version_from_toml(toml_path: str):
# Parse the TOML file
with open(toml_path, "rb") as file:
toml_data = tomllib.load(file)
@@ -58,10 +50,6 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
# Iterate over the libs in MIN_VERSION_LIBS
for lib in MIN_VERSION_LIBS:
if versions_for == "pull_request" and lib in SKIP_IF_PULL_REQUEST:
# some libs only get checked on release because of simultaneous
# changes
continue
# Check if the lib is present in the dependencies
if lib in dependencies:
# Get the version string
@@ -82,10 +70,8 @@ def get_min_version_from_toml(toml_path: str, versions_for: str):
if __name__ == "__main__":
# Get the TOML file path from the command line argument
toml_file = sys.argv[1]
versions_for = sys.argv[2]
assert versions_for in ["release", "pull_request"]
# Call the function to get the minimum versions
min_versions = get_min_version_from_toml(toml_file, versions_for)
min_versions = get_min_version_from_toml(toml_file)
print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()]))

View File

@@ -7,10 +7,6 @@ on:
required: true
type: string
description: "From which folder this pipeline executes"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -21,14 +17,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: "poetry run pytest -m compile tests/integration_tests #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: "poetry run pytest -m compile tests/integration_tests #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: compile-integration

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,14 +21,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: dependency checks ${{ inputs.python-version }}
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: dependency checks ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: pydantic-cross-compat

View File

@@ -6,10 +6,6 @@ on:
working-directory:
required: true
type: string
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -20,14 +16,19 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: Python ${{ inputs.python-version }}
strategy:
matrix:
python-version:
- "3.8"
- "3.11"
name: Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: core

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,15 +21,27 @@ env:
jobs:
build:
name: "make lint #${{ inputs.python-version }}"
name: "make lint #${{ matrix.python-version }}"
runs-on: ubuntu-latest
strategy:
matrix:
# Only lint on the min and max supported Python versions.
# It's extremely unlikely that there's a lint issue on any version in between
# that doesn't show up on the min or max versions.
#
# GitHub rate-limits how many jobs can be running at any one time.
# Starting new jobs is also relatively slow,
# so linting on fewer versions makes CI faster.
python-version:
- "3.8"
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: lint-with-extras
@@ -78,7 +86,7 @@ jobs:
with:
path: |
${{ env.WORKDIR }}/.mypy_cache
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
- name: Analysing the code with our lint
@@ -112,7 +120,7 @@ jobs:
with:
path: |
${{ env.WORKDIR }}/.mypy_cache_test
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ inputs.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
- name: Analysing the code with our lint
working-directory: ${{ inputs.working-directory }}

View File

@@ -122,6 +122,7 @@ jobs:
fi
{
echo 'release-body<<EOF'
echo "# Release $TAG"
echo $PREAMBLE
echo
git log --format="%s" "$PREV_TAG"..HEAD -- $WORKING_DIR
@@ -189,7 +190,7 @@ jobs:
--extra-index-url https://test.pypi.org/simple/ \
"$PKG_NAME==$VERSION" || \
( \
sleep 15 && \
sleep 5 && \
poetry run pip install \
--extra-index-url https://test.pypi.org/simple/ \
"$PKG_NAME==$VERSION" \
@@ -221,17 +222,12 @@ jobs:
run: make tests
working-directory: ${{ inputs.working-directory }}
- name: Check for prerelease versions
working-directory: ${{ inputs.working-directory }}
run: |
poetry run python $GITHUB_WORKSPACE/.github/scripts/check_prerelease_dependencies.py pyproject.toml
- name: Get minimum versions
working-directory: ${{ inputs.working-directory }}
id: min-version
run: |
poetry run pip install packaging
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release)"
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml)"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
echo "min-versions=$min_versions"

View File

@@ -11,10 +11,6 @@ on:
required: false
type: string
description: "Relative path to the langchain library folder"
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -25,14 +21,22 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
name: "make test #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
name: "make test #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }}
cache-key: core
@@ -65,22 +69,3 @@ jobs:
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'
- name: Get minimum versions
working-directory: ${{ inputs.working-directory }}
id: min-version
run: |
poetry run pip install packaging tomli
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request)"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
echo "min-versions=$min_versions"
# Temporarily disabled until we can get the minimum versions working
# - name: Run unit tests with minimum dependency versions
# if: ${{ steps.min-version.outputs.min-versions != '' }}
# env:
# MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
# run: |
# poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
# make tests
# working-directory: ${{ inputs.working-directory }}

View File

@@ -2,11 +2,6 @@ name: test_doc_imports
on:
workflow_call:
inputs:
python-version:
required: true
type: string
description: "Python version to use"
env:
POETRY_VERSION: "1.7.1"
@@ -14,14 +9,18 @@ env:
jobs:
build:
runs-on: ubuntu-latest
name: "check doc imports #${{ inputs.python-version }}"
strategy:
matrix:
python-version:
- "3.12"
name: "check doc imports #${{ matrix.python-version }}"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ inputs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
cache-key: core

View File

@@ -33,96 +33,91 @@ jobs:
run: |
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
outputs:
lint: ${{ steps.set-matrix.outputs.lint }}
test: ${{ steps.set-matrix.outputs.test }}
extended-tests: ${{ steps.set-matrix.outputs.extended-tests }}
compile-integration-tests: ${{ steps.set-matrix.outputs.compile-integration-tests }}
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
docs-edited: ${{ steps.set-matrix.outputs.docs-edited }}
lint:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.lint != '[]' }}
if: ${{ needs.build.outputs.dirs-to-lint != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.lint) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-lint) }}
uses: ./.github/workflows/_lint.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
test:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.test != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
test-doc-imports:
needs: [ build ]
if: ${{ needs.build.outputs.test-doc-imports != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.test-doc-imports) }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' || needs.build.outputs.docs-edited }}
uses: ./.github/workflows/_test_doc_imports.yml
secrets: inherit
with:
python-version: ${{ matrix.job-configs.python-version }}
compile-integration-tests:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.compile-integration-tests != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_compile_integration_test.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
dependencies:
name: cd ${{ matrix.job-configs.working-directory }}
name: cd ${{ matrix.working-directory }}
needs: [ build ]
if: ${{ needs.build.outputs.dependencies != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
strategy:
matrix:
job-configs: ${{ fromJson(needs.build.outputs.dependencies) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-test) }}
uses: ./.github/workflows/_dependencies.yml
with:
working-directory: ${{ matrix.job-configs.working-directory }}
python-version: ${{ matrix.job-configs.python-version }}
working-directory: ${{ matrix.working-directory }}
secrets: inherit
extended-tests:
name: "cd ${{ matrix.job-configs.working-directory }} / make extended_tests #${{ matrix.job-configs.python-version }}"
name: "cd ${{ matrix.working-directory }} / make extended_tests #${{ matrix.python-version }}"
needs: [ build ]
if: ${{ needs.build.outputs.extended-tests != '[]' }}
if: ${{ needs.build.outputs.dirs-to-extended-test != '[]' }}
strategy:
matrix:
# note different variable for extended test dirs
job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }}
working-directory: ${{ fromJson(needs.build.outputs.dirs-to-extended-test) }}
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
runs-on: ubuntu-latest
defaults:
run:
working-directory: ${{ matrix.job-configs.working-directory }}
working-directory: ${{ matrix.working-directory }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.job-configs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
- name: Set up Python ${{ matrix.python-version }} + Poetry ${{ env.POETRY_VERSION }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ matrix.job-configs.python-version }}
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ matrix.job-configs.working-directory }}
working-directory: ${{ matrix.working-directory }}
cache-key: extended
- name: Install dependencies

View File

@@ -11,6 +11,7 @@
[![Open Issues](https://img.shields.io/github/issues-raw/langchain-ai/langchain?style=flat-square)](https://github.com/langchain-ai/langchain/issues)
[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode&style=flat-square)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/langchain-ai/langchain)
[![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
Looking for the JS/TS library? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).

View File

@@ -64,7 +64,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
]
},
{
@@ -355,7 +355,7 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",

View File

@@ -37,7 +37,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -U --quiet langchain langchain-chroma langchain-community openai langchain-experimental\n",
"%pip install -U --quiet langchain langchain_community openai chromadb langchain-experimental\n",
"%pip install --quiet \"unstructured[all-docs]\" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken"
]
},
@@ -344,8 +344,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.embeddings import VertexAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",

View File

@@ -7,7 +7,7 @@
"metadata": {},
"outputs": [],
"source": [
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub langchain-chroma langchain-anthropic"
"pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub chromadb langchain-anthropic"
]
},
{
@@ -645,7 +645,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"# Initialize all_texts with leaf_texts\n",
"all_texts = leaf_texts.copy()\n",

View File

@@ -57,6 +57,4 @@ Notebook | Description
[two_agent_debate_tools.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_agent_debate_tools.ipynb) | Simulate multi-agent dialogues where the agents can utilize various tools.
[two_player_dnd.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/two_player_dnd.ipynb) | Simulate a two-player dungeons & dragons game, where a dialogue simulator class is used to coordinate the dialogue between the protagonist and the dungeon master.
[wikibase_agent.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/wikibase_agent.ipynb) | Create a simple wikibase agent that utilizes sparql generation, with testing done on http://wikidata.org.
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.
[rag-locally-on-intel-cpu.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/rag-locally-on-intel-cpu.ipynb) | Perform Retrieval-Augmented-Generation (RAG) on locally downloaded open-source models using langchain and open source tools and execute it on Intel Xeon CPU. We showed an example of how to apply RAG on Llama 2 model and enable it to answer the queries related to Intel Q1 2024 earnings release.
[visual_RAG_vdms.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/visual_RAG_vdms.ipynb) | Performs Visual Retrieval-Augmented-Generation (RAG) using videos and scene descriptions generated by open source models.
[oracleai_demo.ipynb](https://github.com/langchain-ai/langchain/tree/master/cookbook/oracleai_demo.ipynb) | This guide outlines how to utilize Oracle AI Vector Search alongside Langchain for an end-to-end RAG pipeline, providing step-by-step examples. The process includes loading documents from various sources using OracleDocLoader, summarizing them either within or outside the database with OracleSummary, and generating embeddings similarly through OracleEmbeddings. It also covers chunking documents according to specific requirements using Advanced Oracle Capabilities from OracleTextSplitter, and finally, storing and indexing these documents in a Vector Store for querying with OracleVS.

View File

@@ -39,7 +39,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml langchainhub"
"! pip install langchain unstructured[all-docs] pydantic lxml langchainhub"
]
},
{
@@ -320,7 +320,7 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",

View File

@@ -59,7 +59,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml"
"! pip install langchain unstructured[all-docs] pydantic lxml"
]
},
{
@@ -375,7 +375,7 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",

View File

@@ -59,7 +59,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml"
"! pip install langchain unstructured[all-docs] pydantic lxml"
]
},
{
@@ -378,8 +378,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.embeddings import GPT4AllEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"# The vectorstore to use to index the child chunks\n",

View File

@@ -19,7 +19,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain openai langchain_chroma langchain-experimental # (newest versions required for multi-modal)"
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
]
},
{
@@ -132,7 +132,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"baseline = Chroma.from_texts(\n",

View File

@@ -28,7 +28,7 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
"from langchain_text_splitters import CharacterTextSplitter\n",
"\n",

View File

@@ -14,7 +14,7 @@
}
],
"source": [
"%pip install -qU langchain-airbyte langchain_chroma"
"%pip install -qU langchain-airbyte"
]
},
{
@@ -123,7 +123,7 @@
"outputs": [],
"source": [
"import tiktoken\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"enc = tiktoken.get_encoding(\"cl100k_base\")\n",

View File

@@ -39,7 +39,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub langchain-chroma hnswlib --upgrade --quiet"
"! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet"
]
},
{
@@ -547,7 +547,7 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",

View File

@@ -84,7 +84,7 @@
}
],
"source": [
"%pip install --quiet pypdf langchain-chroma tiktoken openai \n",
"%pip install --quiet pypdf chromadb tiktoken openai \n",
"%pip uninstall -y langchain-fireworks\n",
"%pip install --editable /mnt/disks/data/langchain/libs/partners/fireworks"
]
@@ -138,7 +138,7 @@
"all_splits = text_splitter.split_documents(data)\n",
"\n",
"# Add to vectorDB\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_fireworks.embeddings import FireworksEmbeddings\n",
"\n",
"vectorstore = Chroma.from_documents(\n",

View File

@@ -170,7 +170,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_text_splitters import CharacterTextSplitter\n",
"\n",
"with open(\"../../state_of_the_union.txt\") as f:\n",

File diff suppressed because one or more lines are too long

View File

@@ -7,7 +7,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
]
},
{
@@ -30,8 +30,8 @@
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"urls = [\n",

View File

@@ -7,7 +7,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph tavily-python"
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph tavily-python"
]
},
{
@@ -77,8 +77,8 @@
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"urls = [\n",
@@ -180,8 +180,8 @@
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema import Document\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",

View File

@@ -7,7 +7,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
"! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph"
]
},
{
@@ -86,8 +86,8 @@
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"urls = [\n",
@@ -188,7 +188,7 @@
"from langchain.output_parsers import PydanticOutputParser\n",
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",

View File

@@ -58,7 +58,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)"
"! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)"
]
},
{
@@ -187,7 +187,7 @@
"\n",
"import chromadb\n",
"import numpy as np\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_experimental.open_clip import OpenCLIPEmbeddings\n",
"from PIL import Image as _PILImage\n",
"\n",

View File

@@ -58,7 +58,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain"
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain"
]
},
{
@@ -167,7 +167,7 @@
"source": [
"import os\n",
"\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_nomic import NomicEmbeddings\n",

View File

@@ -56,7 +56,7 @@
},
"outputs": [],
"source": [
"! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain # (newest versions required for multi-modal)"
"! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)"
]
},
{
@@ -194,7 +194,7 @@
"\n",
"import chromadb\n",
"import numpy as np\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_nomic import NomicEmbeddings\n",
"from PIL import Image as _PILImage\n",
"\n",

View File

@@ -20,8 +20,8 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"from langchain_text_splitters import CharacterTextSplitter"
]

View File

@@ -80,7 +80,7 @@
"outputs": [],
"source": [
"from langchain.schema import Document\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"embeddings = OpenAIEmbeddings()"

View File

@@ -1,756 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "10f50955-be55-422f-8c62-3a32f8cf02ed",
"metadata": {},
"source": [
"# RAG application running locally on Intel Xeon CPU using langchain and open-source models"
]
},
{
"cell_type": "markdown",
"id": "48113be6-44bb-4aac-aed3-76a1365b9561",
"metadata": {},
"source": [
"Author - Pratool Bharti (pratool.bharti@intel.com)"
]
},
{
"cell_type": "markdown",
"id": "8b10b54b-1572-4ea1-9c1e-1d29fcc3dcd9",
"metadata": {},
"source": [
"In this cookbook, we use langchain tools and open source models to execute locally on CPU. This notebook has been validated to run on Intel Xeon 8480+ CPU. Here we implement a RAG pipeline for Llama2 model to answer questions about Intel Q1 2024 earnings release."
]
},
{
"cell_type": "markdown",
"id": "acadbcec-3468-4926-8ce5-03b678041c0a",
"metadata": {},
"source": [
"**Create a conda or virtualenv environment with python >=3.10 and install following libraries**\n",
"<br>\n",
"\n",
"`pip install --upgrade langchain langchain-community langchainhub langchain-chroma bs4 gpt4all pypdf pysqlite3-binary` <br>\n",
"`pip install llama-cpp-python --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu`"
]
},
{
"cell_type": "markdown",
"id": "84c392c8-700a-42ec-8e94-806597f22e43",
"metadata": {},
"source": [
"**Load pysqlite3 in sys modules since ChromaDB requires sqlite3.**"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "145cd491-b388-4ea7-bdc8-2f4995cac6fd",
"metadata": {},
"outputs": [],
"source": [
"__import__(\"pysqlite3\")\n",
"import sys\n",
"\n",
"sys.modules[\"sqlite3\"] = sys.modules.pop(\"pysqlite3\")"
]
},
{
"cell_type": "markdown",
"id": "14dde7e2-b236-49b9-b3a0-08c06410418c",
"metadata": {},
"source": [
"**Import essential components from langchain to load and split data**"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "887643ba-249e-48d6-9aa7-d25087e8dfbf",
"metadata": {},
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_community.document_loaders import PyPDFLoader"
]
},
{
"cell_type": "markdown",
"id": "922c0eba-8736-4de5-bd2f-3d0f00b16e43",
"metadata": {},
"source": [
"**Download Intel Q1 2024 earnings release**"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "2d6a2419-5338-4188-8615-a40a65ff8019",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--2024-07-15 15:04:43-- https://d1io3yog0oux5.cloudfront.net/_11d435a500963f99155ee058df09f574/intel/db/887/9014/earnings_release/Q1+24_EarningsRelease_FINAL.pdf\n",
"Resolving proxy-dmz.intel.com (proxy-dmz.intel.com)... 10.7.211.16\n",
"Connecting to proxy-dmz.intel.com (proxy-dmz.intel.com)|10.7.211.16|:912... connected.\n",
"Proxy request sent, awaiting response... 200 OK\n",
"Length: 133510 (130K) [application/pdf]\n",
"Saving to: intel_q1_2024_earnings.pdf\n",
"\n",
"intel_q1_2024_earni 100%[===================>] 130.38K --.-KB/s in 0.005s \n",
"\n",
"2024-07-15 15:04:44 (24.6 MB/s) - intel_q1_2024_earnings.pdf saved [133510/133510]\n",
"\n"
]
}
],
"source": [
"!wget 'https://d1io3yog0oux5.cloudfront.net/_11d435a500963f99155ee058df09f574/intel/db/887/9014/earnings_release/Q1+24_EarningsRelease_FINAL.pdf' -O intel_q1_2024_earnings.pdf"
]
},
{
"cell_type": "markdown",
"id": "e3612627-e105-453d-8a50-bbd6e39dedb5",
"metadata": {},
"source": [
"**Loading earning release pdf document through PyPDFLoader**"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "cac6278e-ebad-4224-a062-bf6daca24cb0",
"metadata": {},
"outputs": [],
"source": [
"loader = PyPDFLoader(\"intel_q1_2024_earnings.pdf\")\n",
"data = loader.load()"
]
},
{
"cell_type": "markdown",
"id": "a7dca43b-1c62-41df-90c7-6ed2904f823d",
"metadata": {},
"source": [
"**Splitting entire document in several chunks with each chunk size is 500 tokens**"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "4486adbe-0d0e-4685-8c08-c1774ed6e993",
"metadata": {},
"outputs": [],
"source": [
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
"all_splits = text_splitter.split_documents(data)"
]
},
{
"cell_type": "markdown",
"id": "af142346-e793-4a52-9a56-63e3be416b3d",
"metadata": {},
"source": [
"**Looking at the first split of the document**"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e4240fd1-898e-4bfc-a377-02c9bc25b56e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(metadata={'source': 'intel_q1_2024_earnings.pdf', 'page': 0}, page_content='Intel Corporation\\n2200 Mission College Blvd.\\nSanta Clara, CA 95054-1549\\n \\nNews Release\\n Intel Reports First -Quarter 2024 Financial Results\\nNEWS SUMMARY\\n▪First-quarter revenue of $12.7 billion , up 9% year over year (YoY).\\n▪First-quarter GAAP earnings (loss) per share (EPS) attributable to Intel was $(0.09) ; non-GAAP EPS \\nattributable to Intel was $0.18 .')"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"all_splits[0]"
]
},
{
"cell_type": "markdown",
"id": "b88d2632-7c1b-49ef-a691-c0eb67d23e6a",
"metadata": {},
"source": [
"**One of the major step in RAG is to convert each split of document into embeddings and store in a vector database such that searching relevant documents are efficient.** <br>\n",
"**For that, importing Chroma vector database from langchain. Also, importing open source GPT4All for embedding models**"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "9ff99dd7-9d47-4239-ba0a-d775792334ba",
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_community.embeddings import GPT4AllEmbeddings"
]
},
{
"cell_type": "markdown",
"id": "b5d1f4dd-dd8d-4a20-95d1-2dbdd204375a",
"metadata": {},
"source": [
"**In next step, we will download one of the most popular embedding model \"all-MiniLM-L6-v2\". Find more details of the model at this link https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2**"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "05db3494-5d8e-4a13-9941-26330a86f5e5",
"metadata": {},
"outputs": [],
"source": [
"model_name = \"all-MiniLM-L6-v2.gguf2.f16.gguf\"\n",
"gpt4all_kwargs = {\"allow_download\": \"True\"}\n",
"embeddings = GPT4AllEmbeddings(model_name=model_name, gpt4all_kwargs=gpt4all_kwargs)"
]
},
{
"cell_type": "markdown",
"id": "4e53999e-1983-46ac-8039-2783e194c3ae",
"metadata": {},
"source": [
"**Store all the embeddings in the Chroma database**"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "0922951a-9ddf-4761-973d-8e9a86f61284",
"metadata": {},
"outputs": [],
"source": [
"vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings)"
]
},
{
"cell_type": "markdown",
"id": "29f94fa0-6c75-4a65-a1a3-debc75422479",
"metadata": {},
"source": [
"**Now, let's find relevant splits from the documents related to the question**"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "88c8152d-ec7a-4f0b-9d86-877789407537",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"4\n"
]
}
],
"source": [
"question = \"What is Intel CCG revenue in Q1 2024\"\n",
"docs = vectorstore.similarity_search(question)\n",
"print(len(docs))"
]
},
{
"cell_type": "markdown",
"id": "53330c6b-cb0f-43f9-b379-2e57ac1e5335",
"metadata": {},
"source": [
"**Look at the first retrieved document from the vector database**"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "43a6d94f-b5c4-47b0-a353-2db4c3d24d9c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(metadata={'page': 1, 'source': 'intel_q1_2024_earnings.pdf'}, page_content='Client Computing Group (CCG) $7.5 billion up31%\\nData Center and AI (DCAI) $3.0 billion up5%\\nNetwork and Edge (NEX) $1.4 billion down 8%\\nTotal Intel Products revenue $11.9 billion up17%\\nIntel Foundry $4.4 billion down 10%\\nAll other:\\nAltera $342 million down 58%\\nMobileye $239 million down 48%\\nOther $194 million up17%\\nTotal all other revenue $775 million down 46%\\nIntersegment eliminations $(4.4) billion\\nTotal net revenue $12.7 billion up9%\\nIntel Products Highlights')"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"docs[0]"
]
},
{
"cell_type": "markdown",
"id": "64ba074f-4b36-442e-b7e2-b26d6e2815c3",
"metadata": {},
"source": [
"**Download Lllama-2 model from Huggingface and store locally** <br>\n",
"**You can download different quantization variant of Lllama-2 model from the link below. We are using Q8 version here (7.16GB).** <br>\n",
"https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c8dd0811-6f43-4bc6-b854-2ab377639c9a",
"metadata": {},
"outputs": [],
"source": [
"!huggingface-cli download TheBloke/Llama-2-7b-Chat-GGUF llama-2-7b-chat.Q8_0.gguf --local-dir . --local-dir-use-symlinks False"
]
},
{
"cell_type": "markdown",
"id": "3895b1f5-f51d-4539-abf0-af33d7ca48ea",
"metadata": {},
"source": [
"**Import langchain components required to load downloaded LLMs model**"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "fb087088-aa62-44c0-8356-061e9b9f1186",
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_community.llms import LlamaCpp"
]
},
{
"cell_type": "markdown",
"id": "5a8a111e-2614-4b70-b034-85cd3e7304cb",
"metadata": {},
"source": [
"**Loading the local Lllama-2 model using Llama-cpp library**"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "fb917da2-c0d7-4995-b56d-26254276e0da",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"llama_model_loader: loaded meta data with 19 key-value pairs and 291 tensors from llama-2-7b-chat.Q8_0.gguf (version GGUF V2)\n",
"llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n",
"llama_model_loader: - kv 0: general.architecture str = llama\n",
"llama_model_loader: - kv 1: general.name str = LLaMA v2\n",
"llama_model_loader: - kv 2: llama.context_length u32 = 4096\n",
"llama_model_loader: - kv 3: llama.embedding_length u32 = 4096\n",
"llama_model_loader: - kv 4: llama.block_count u32 = 32\n",
"llama_model_loader: - kv 5: llama.feed_forward_length u32 = 11008\n",
"llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128\n",
"llama_model_loader: - kv 7: llama.attention.head_count u32 = 32\n",
"llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 32\n",
"llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000001\n",
"llama_model_loader: - kv 10: general.file_type u32 = 7\n",
"llama_model_loader: - kv 11: tokenizer.ggml.model str = llama\n",
"llama_model_loader: - kv 12: tokenizer.ggml.tokens arr[str,32000] = [\"<unk>\", \"<s>\", \"</s>\", \"<0x00>\", \"<...\n",
"llama_model_loader: - kv 13: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...\n",
"llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n",
"llama_model_loader: - kv 15: tokenizer.ggml.bos_token_id u32 = 1\n",
"llama_model_loader: - kv 16: tokenizer.ggml.eos_token_id u32 = 2\n",
"llama_model_loader: - kv 17: tokenizer.ggml.unknown_token_id u32 = 0\n",
"llama_model_loader: - kv 18: general.quantization_version u32 = 2\n",
"llama_model_loader: - type f32: 65 tensors\n",
"llama_model_loader: - type q8_0: 226 tensors\n",
"llm_load_vocab: special tokens cache size = 259\n",
"llm_load_vocab: token to piece cache size = 0.1684 MB\n",
"llm_load_print_meta: format = GGUF V2\n",
"llm_load_print_meta: arch = llama\n",
"llm_load_print_meta: vocab type = SPM\n",
"llm_load_print_meta: n_vocab = 32000\n",
"llm_load_print_meta: n_merges = 0\n",
"llm_load_print_meta: vocab_only = 0\n",
"llm_load_print_meta: n_ctx_train = 4096\n",
"llm_load_print_meta: n_embd = 4096\n",
"llm_load_print_meta: n_layer = 32\n",
"llm_load_print_meta: n_head = 32\n",
"llm_load_print_meta: n_head_kv = 32\n",
"llm_load_print_meta: n_rot = 128\n",
"llm_load_print_meta: n_swa = 0\n",
"llm_load_print_meta: n_embd_head_k = 128\n",
"llm_load_print_meta: n_embd_head_v = 128\n",
"llm_load_print_meta: n_gqa = 1\n",
"llm_load_print_meta: n_embd_k_gqa = 4096\n",
"llm_load_print_meta: n_embd_v_gqa = 4096\n",
"llm_load_print_meta: f_norm_eps = 0.0e+00\n",
"llm_load_print_meta: f_norm_rms_eps = 1.0e-06\n",
"llm_load_print_meta: f_clamp_kqv = 0.0e+00\n",
"llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n",
"llm_load_print_meta: f_logit_scale = 0.0e+00\n",
"llm_load_print_meta: n_ff = 11008\n",
"llm_load_print_meta: n_expert = 0\n",
"llm_load_print_meta: n_expert_used = 0\n",
"llm_load_print_meta: causal attn = 1\n",
"llm_load_print_meta: pooling type = 0\n",
"llm_load_print_meta: rope type = 0\n",
"llm_load_print_meta: rope scaling = linear\n",
"llm_load_print_meta: freq_base_train = 10000.0\n",
"llm_load_print_meta: freq_scale_train = 1\n",
"llm_load_print_meta: n_ctx_orig_yarn = 4096\n",
"llm_load_print_meta: rope_finetuned = unknown\n",
"llm_load_print_meta: ssm_d_conv = 0\n",
"llm_load_print_meta: ssm_d_inner = 0\n",
"llm_load_print_meta: ssm_d_state = 0\n",
"llm_load_print_meta: ssm_dt_rank = 0\n",
"llm_load_print_meta: model type = 7B\n",
"llm_load_print_meta: model ftype = Q8_0\n",
"llm_load_print_meta: model params = 6.74 B\n",
"llm_load_print_meta: model size = 6.67 GiB (8.50 BPW) \n",
"llm_load_print_meta: general.name = LLaMA v2\n",
"llm_load_print_meta: BOS token = 1 '<s>'\n",
"llm_load_print_meta: EOS token = 2 '</s>'\n",
"llm_load_print_meta: UNK token = 0 '<unk>'\n",
"llm_load_print_meta: LF token = 13 '<0x0A>'\n",
"llm_load_print_meta: max token length = 48\n",
"llm_load_tensors: ggml ctx size = 0.14 MiB\n",
"llm_load_tensors: CPU buffer size = 6828.64 MiB\n",
"...................................................................................................\n",
"llama_new_context_with_model: n_ctx = 2048\n",
"llama_new_context_with_model: n_batch = 512\n",
"llama_new_context_with_model: n_ubatch = 512\n",
"llama_new_context_with_model: flash_attn = 0\n",
"llama_new_context_with_model: freq_base = 10000.0\n",
"llama_new_context_with_model: freq_scale = 1\n",
"llama_kv_cache_init: CPU KV buffer size = 1024.00 MiB\n",
"llama_new_context_with_model: KV self size = 1024.00 MiB, K (f16): 512.00 MiB, V (f16): 512.00 MiB\n",
"llama_new_context_with_model: CPU output buffer size = 0.12 MiB\n",
"llama_new_context_with_model: CPU compute buffer size = 164.01 MiB\n",
"llama_new_context_with_model: graph nodes = 1030\n",
"llama_new_context_with_model: graph splits = 1\n",
"AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 0 | \n",
"Model metadata: {'tokenizer.ggml.unknown_token_id': '0', 'tokenizer.ggml.eos_token_id': '2', 'general.architecture': 'llama', 'llama.context_length': '4096', 'general.name': 'LLaMA v2', 'llama.embedding_length': '4096', 'llama.feed_forward_length': '11008', 'llama.attention.layer_norm_rms_epsilon': '0.000001', 'llama.rope.dimension_count': '128', 'llama.attention.head_count': '32', 'tokenizer.ggml.bos_token_id': '1', 'llama.block_count': '32', 'llama.attention.head_count_kv': '32', 'general.quantization_version': '2', 'tokenizer.ggml.model': 'llama', 'general.file_type': '7'}\n",
"Using fallback chat format: llama-2\n"
]
}
],
"source": [
"llm = LlamaCpp(\n",
" model_path=\"llama-2-7b-chat.Q8_0.gguf\",\n",
" n_gpu_layers=-1,\n",
" n_batch=512,\n",
" n_ctx=2048,\n",
" f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls\n",
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "43e06f56-ef97-451b-87d9-8465ea442aed",
"metadata": {},
"source": [
"**Now let's ask the same question to Llama model without showing them the earnings release.**"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "1033dd82-5532-437d-a548-27695e109589",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"?\n",
"(NASDAQ:INTC)\n",
"Intel's CCG (Client Computing Group) revenue for Q1 2024 was $9.6 billion, a decrease of 35% from the previous quarter and a decrease of 42% from the same period last year."
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"llama_print_timings: load time = 131.20 ms\n",
"llama_print_timings: sample time = 16.05 ms / 68 runs ( 0.24 ms per token, 4236.76 tokens per second)\n",
"llama_print_timings: prompt eval time = 131.14 ms / 16 tokens ( 8.20 ms per token, 122.01 tokens per second)\n",
"llama_print_timings: eval time = 3225.00 ms / 67 runs ( 48.13 ms per token, 20.78 tokens per second)\n",
"llama_print_timings: total time = 3466.40 ms / 83 tokens\n"
]
},
{
"data": {
"text/plain": [
"\"?\\n(NASDAQ:INTC)\\nIntel's CCG (Client Computing Group) revenue for Q1 2024 was $9.6 billion, a decrease of 35% from the previous quarter and a decrease of 42% from the same period last year.\""
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm.invoke(question)"
]
},
{
"cell_type": "markdown",
"id": "75f5cb10-746f-4e37-9386-b85a4d2b84ef",
"metadata": {},
"source": [
"**As you can see, model is giving wrong information. Correct asnwer is CCG revenue in Q1 2024 is $7.5B. Now let's apply RAG using the earning release document**"
]
},
{
"cell_type": "markdown",
"id": "0f4150ec-5692-4756-b11a-22feb7ab88ff",
"metadata": {},
"source": [
"**in RAG, we modify the input prompt by adding relevent documents with the question. Here, we use one of the popular RAG prompt**"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "226c14b0-f43e-4a1f-a1e4-04731d467ec4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: {question} \\nContext: {context} \\nAnswer:\"))]"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain import hub\n",
"\n",
"rag_prompt = hub.pull(\"rlm/rag-prompt\")\n",
"rag_prompt.messages"
]
},
{
"cell_type": "markdown",
"id": "77deb6a0-0950-450a-916a-f2a029676c20",
"metadata": {},
"source": [
"**Appending all retreived documents in a single document**"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "2dbc3327-6ef3-4c1f-8797-0c71964b0921",
"metadata": {},
"outputs": [],
"source": [
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)"
]
},
{
"cell_type": "markdown",
"id": "2e2d9f18-49d0-43a3-bea8-78746ffa86b7",
"metadata": {},
"source": [
"**The last step is to create a chain using langchain tool that will create an e2e pipeline. It will take question and context as an input.**"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "427379c2-51ff-4e0f-8278-a45221363299",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough, RunnablePick\n",
"\n",
"# Chain\n",
"chain = (\n",
" RunnablePassthrough.assign(context=RunnablePick(\"context\") | format_docs)\n",
" | rag_prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "095d6280-c949-4d00-8e32-8895a82d245f",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Llama.generate: prefix-match hit\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" Based on the provided context, Intel CCG revenue in Q1 2024 was $7.5 billion up 31%."
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"llama_print_timings: load time = 131.20 ms\n",
"llama_print_timings: sample time = 7.74 ms / 31 runs ( 0.25 ms per token, 4004.13 tokens per second)\n",
"llama_print_timings: prompt eval time = 2529.41 ms / 674 tokens ( 3.75 ms per token, 266.46 tokens per second)\n",
"llama_print_timings: eval time = 1542.94 ms / 30 runs ( 51.43 ms per token, 19.44 tokens per second)\n",
"llama_print_timings: total time = 4123.68 ms / 704 tokens\n"
]
},
{
"data": {
"text/plain": [
"' Based on the provided context, Intel CCG revenue in Q1 2024 was $7.5 billion up 31%.'"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"context\": docs, \"question\": question})"
]
},
{
"cell_type": "markdown",
"id": "638364b2-6bd2-4471-9961-d3a1d1b9d4ee",
"metadata": {},
"source": [
"**Now we see the results are correct as it is mentioned in earnings release.** <br>\n",
"**To further automate, we will create a chain that will take input as question and retriever so that we don't need to retrieve documents seperately**"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "4654e5b7-635f-4767-8b31-4c430164cdd5",
"metadata": {},
"outputs": [],
"source": [
"retriever = vectorstore.as_retriever()\n",
"qa_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | rag_prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0979f393-fd0a-4e82-b844-68371c6ad68f",
"metadata": {},
"source": [
"**Now we only need to pass the question to the chain and it will fetch the contexts directly from the vector database to generate the answer**\n",
"<br>\n",
"**Let's try with another question**"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "3ea07b82-e6ec-4084-85f4-191373530172",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Llama.generate: prefix-match hit\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" According to the provided context, Intel DCAI revenue in Q1 2024 was $3.0 billion up 5%."
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"llama_print_timings: load time = 131.20 ms\n",
"llama_print_timings: sample time = 6.28 ms / 31 runs ( 0.20 ms per token, 4937.88 tokens per second)\n",
"llama_print_timings: prompt eval time = 2681.93 ms / 730 tokens ( 3.67 ms per token, 272.19 tokens per second)\n",
"llama_print_timings: eval time = 1471.07 ms / 30 runs ( 49.04 ms per token, 20.39 tokens per second)\n",
"llama_print_timings: total time = 4206.77 ms / 760 tokens\n"
]
},
{
"data": {
"text/plain": [
"' According to the provided context, Intel DCAI revenue in Q1 2024 was $3.0 billion up 5%.'"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"qa_chain.invoke(\"what is Intel DCAI revenue in Q1 2024?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9407f2a0-4a35-4315-8e96-02fcb80f210c",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "rag-on-intel",
"language": "python",
"name": "rag-on-intel"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -36,10 +36,10 @@
"from bs4 import BeautifulSoup as Soup\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryByteStore, LocalFileStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders.recursive_url_loader import (\n",
" RecursiveUrlLoader,\n",
")\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"# For our example, we'll load docs from the web\n",
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
@@ -370,14 +370,13 @@
],
"source": [
"import torch\n",
"from langchain_huggingface.llms import HuggingFacePipeline\n",
"from optimum.intel.ipex import IPEXModelForCausalLM\n",
"from transformers import AutoTokenizer, pipeline\n",
"from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n",
"from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n",
"\n",
"model_id = \"Intel/neural-chat-7b-v3-3\"\n",
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
"model = IPEXModelForCausalLM.from_pretrained(\n",
" model_id, torch_dtype=torch.bfloat16, export=True\n",
"model = AutoModelForCausalLM.from_pretrained(\n",
" model_id, device_map=\"auto\", torch_dtype=torch.bfloat16\n",
")\n",
"\n",
"pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=100)\n",
@@ -582,7 +581,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.18"
}
},
"nbformat": 4,

View File

@@ -740,7 +740,7 @@ Even this relatively large model will most likely fail to generate more complica
```bash
poetry run pip install pyyaml langchain_chroma
poetry run pip install pyyaml chromadb
import yaml
```
@@ -994,7 +994,7 @@ from langchain.prompts import FewShotPromptTemplate, PromptTemplate
from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector
from langchain_chroma import Chroma
from langchain_community.vectorstores import Chroma
example_prompt = PromptTemplate(
input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"],

View File

@@ -22,7 +22,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install --quiet pypdf tiktoken openai langchain-chroma langchain-together"
"! pip install --quiet pypdf chromadb tiktoken openai langchain-together"
]
},
{
@@ -45,8 +45,8 @@
"all_splits = text_splitter.split_documents(data)\n",
"\n",
"# Add to vectorDB\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"\"\"\"\n",
"from langchain_together.embeddings import TogetherEmbeddings\n",

File diff suppressed because one or more lines are too long

View File

@@ -13,7 +13,7 @@ OUTPUT_NEW_DOCS_DIR = $(OUTPUT_NEW_DIR)/docs
PYTHON = .venv/bin/python
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm|couchbase" | tr '\n' ' ')
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm" | tr '\n' ' ')
PORT ?= 3001

View File

@@ -178,10 +178,3 @@ autosummary_generate = True
html_copy_source = False
html_show_sourcelink = False
# Set canonical URL from the Read the Docs Domain
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
# Tell Jinja2 templates the build is running on Read the Docs
if os.environ.get("READTHEDOCS", "") == "True":
html_context["READTHEDOCS"] = True

View File

@@ -78,7 +78,7 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
continue
if inspect.isclass(type_):
# The type of the class is used to select a template
# The clasification of the class is used to select a template
# for the object when rendering the documentation.
# See `templates` directory for defined templates.
# This is a hacky solution to distinguish between different

View File

@@ -55,7 +55,6 @@ A developer platform that lets you debug, test, evaluate, and monitor LLM applic
dark: useBaseUrl('/svg/langchain_stack_062024_dark.svg'),
}}
title="LangChain Framework Overview"
style={{ width: "100%" }}
/>
## LangChain Expression Language (LCEL)
@@ -236,7 +235,7 @@ This is where information like log-probs and token usage may be stored.
These represent a decision from an language model to call a tool. They are included as part of an `AIMessage` output.
They can be accessed from there with the `.tool_calls` property.
This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with the following arguments:
This property returns a list of dictionaries. Each dictionary has the following keys:
- `name`: The name of the tool that should be called.
- `args`: The arguments to that tool.
@@ -246,19 +245,14 @@ This property returns a list of `ToolCall`s. A `ToolCall` is a dictionary with t
This represents a system message, which tells the model how to behave. Not every model provider supports this.
#### ToolMessage
This represents the result of a tool call. In addition to `role` and `content`, this message has:
- a `tool_call_id` field which conveys the id of the call to the tool that was called to produce this result.
- an `artifact` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
#### (Legacy) FunctionMessage
This is a legacy message type, corresponding to OpenAI's legacy function-calling API. ToolMessage should be used instead to correspond to the updated tool-calling API.
#### FunctionMessage
This represents the result of a function call. In addition to `role` and `content`, this message has a `name` parameter which conveys the name of the function that was called to produce this result.
#### ToolMessage
This represents the result of a tool call. This is distinct from a FunctionMessage in order to match OpenAI's `function` and `tool` message types. In addition to `role` and `content`, this message has a `tool_call_id` parameter which conveys the id of the call to the tool that was called to produce this result.
### Prompt templates
<span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span>
@@ -501,87 +495,35 @@ For specifics on how to use retrievers, see the [relevant how-to guides here](/d
### Tools
<span data-heading-keywords="tool,tools"></span>
Tools are utilities designed to be called by a model: their inputs are designed to be generated by models, and their outputs are designed to be passed back to models.
Tools are needed whenever you want a model to control parts of your code or call out to external APIs.
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
A tool consists of:
A tool consists of the following components:
1. The name of the tool.
2. A description of what the tool does.
3. A JSON schema defining the inputs to the tool.
4. A function (and, optionally, an async variant of the function).
1. The name of the tool
2. A description of what the tool does
3. JSON schema of what the inputs to the tool are
4. The function to call
5. Whether the result of a tool should be returned directly to the user (only relevant for agents)
When a tool is bound to a model, the name, description and JSON schema are provided as context to the model.
Given a list of tools and a set of instructions, a model can request to call one or more tools with specific inputs.
Typical usage may look like the following:
The name, description and JSON schema are provided as context
to the LLM, allowing the LLM to determine how to use the tool
appropriately.
```python
tools = [...] # Define a list of tools
llm_with_tools = llm.bind_tools(tools)
ai_msg = llm_with_tools.invoke("do xyz...") # AIMessage(tool_calls=[ToolCall(...), ...], ...)
```
Given a list of available tools and a prompt, an LLM can request
that one or more tools be invoked with appropriate arguments.
The `AIMessage` returned from the model MAY have `tool_calls` associated with it.
Read [this guide](/docs/concepts/#aimessage) for more information on what the response type may look like.
Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following:
Once the chosen tools are invoked, the results can be passed back to the model so that it can complete whatever task
it's performing.
There are generally two different ways to invoke the tool and pass back the response:
- Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models.
- Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls.
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
- Simpler tools are generally easier for models to use than more complex tools.
#### Invoke with just the arguments
For specifics on how to use tools, see the [relevant how-to guides here](/docs/how_to/#tools).
When you invoke a tool with just the arguments, you will get back the raw tool output (usually a string).
This generally looks like:
```python
# You will want to previously check that the LLM returned tool calls
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
tool_output = tool.invoke(tool_call["args"])
tool_message = ToolMessage(content=tool_output, tool_call_id=tool_call["id"], name=tool_call["name"])
```
Note that the `content` field will generally be passed back to the model.
If you do not want the raw tool response to be passed to the model, but you still want to keep it around,
you can transform the tool output but also pass it as an artifact (read more about [`ToolMessage.artifact` here](/docs/concepts/#toolmessage))
```python
... # Same code as above
response_for_llm = transform(response)
tool_message = ToolMessage(content=response_for_llm, tool_call_id=tool_call["id"], name=tool_call["name"], artifact=tool_output)
```
#### Invoke with `ToolCall`
The other way to invoke a tool is to call it with the full `ToolCall` that was generated by the model.
When you do this, the tool will return a ToolMessage.
The benefits of this are that you don't have to write the logic yourself to transform the tool output into a ToolMessage.
This generally looks like:
```python
tool_call = ai_msg.tool_calls[0] # ToolCall(args={...}, id=..., ...)
tool_message = tool.invoke(tool_call)
# -> ToolMessage(content="tool result foobar...", tool_call_id=..., name="tool_name")
```
If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the ToolMessage, you will need to have the tool return two things.
Read more about [defining tools that return artifacts here](/docs/how_to/tool_artifacts/).
#### Best practices
When designing tools to be used by a model, it is important to keep in mind that:
- Chat models that have explicit [tool-calling APIs](/docs/concepts/#functiontool-calling) will be better at tool calling than non-fine-tuned models.
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas. This another form of prompt engineering.
- Simple, narrowly scoped tools are easier for models to use than complex tools.
#### Related
For specifics on how to use tools, see the [tools how-to guides](/docs/how_to/#tools).
To use a pre-built tool, see the [tool integration docs](/docs/integrations/tools/).
To use an existing pre-built tool, see [here](docs/integrations/tools/) for a list of pre-built tools.
### Toolkits
<span data-heading-keywords="toolkit,toolkits"></span>
Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.
@@ -879,7 +821,7 @@ We recommend this method as a starting point when working with structured output
- If multiple underlying techniques are supported, you can supply a `method` parameter to
[toggle which one is used](/docs/how_to/structured_output/#advanced-specifying-the-method-for-structuring-outputs).
You may want or need to use other techniques if:
You may want or need to use other techiniques if:
- The chat model you are using does not support tool calling.
- You are working with very complex schemas and the model is having trouble generating outputs that conform.
@@ -1080,7 +1022,7 @@ See our [blog post overview](https://blog.langchain.dev/query-construction/) and
#### Indexing
Fourth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
Fouth, consider the design of your document index. A simple and powerful idea is to **decouple the documents that you index for retrieval from the documents that you pass to the LLM for generation.** Indexing frequently uses embedding models with vector stores, which [compress the semantic information in documents to fixed-size vectors](/docs/concepts/#embedding-models).
Many RAG approaches focus on splitting documents into chunks and retrieving some number based on similarity to an input question for the LLM. But chunk size and chunk number can be difficult to set and affect results if they do not provide full context for the LLM to answer a question. Furthermore, LLMs are increasingly capable of processing millions of tokens.
@@ -1188,7 +1130,7 @@ Table columns:
| Token | [many classes](/docs/how_to/split_by_token/) | Tokens | | Splits text on tokens. There exist a few different ways to measure tokens. |
| Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. |
| Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) |
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
| Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. |
### Evaluation
<span data-heading-keywords="evaluation,evaluate"></span>

View File

@@ -33,8 +33,6 @@ Some examples include:
- [Build a Simple LLM Application with LCEL](/docs/tutorials/llm_chain/)
- [Build a Retrieval Augmented Generation (RAG) App](/docs/tutorials/rag/)
A good structural rule of thumb is to follow the structure of this [example from Numpy](https://numpy.org/numpy-tutorials/content/tutorial-svd.html).
Here are some high-level tips on writing a good tutorial:

View File

@@ -11,7 +11,7 @@ There are a few different places you can contribute integrations for LangChain:
- **Community**: For lighter-weight integrations that are primarily maintained by LangChain and the Open Source Community.
- **Partner Packages**: For independent packages that are co-maintained by LangChain and a partner.
For the most part, **new integrations should be added to the Community package**. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
For the most part, new integrations should be added to the Community package. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package.
In the following sections, we'll walk through how to contribute to each of these packages from a fake company, `Parrot Link AI`.
@@ -60,10 +60,6 @@ And add documentation to:
## Partner package in LangChain repo
:::caution
Before starting a **partner** package, please confirm your intent with the LangChain team. Partner packages require more maintenance as separate packages, so we will close PRs that add new partner packages without prior discussion. See the above section for how to add a community integration.
:::
Partner packages can be hosted in the `LangChain` monorepo or in an external repo.
Partner package in the `LangChain` repo is placed in `libs/partners/{partner}`

View File

@@ -153,7 +153,7 @@
"\n",
" def parse(self, text: str) -> List[str]:\n",
" lines = text.strip().split(\"\\n\")\n",
" return list(filter(None, lines)) # Remove empty lines\n",
" return lines\n",
"\n",
"\n",
"output_parser = LineListOutputParser()\n",

View File

@@ -1,342 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to dispatch custom callback events\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"- [Astream Events API](/docs/concepts/#astream_events) the `astream_events` method will surface custom callback events.\n",
":::\n",
"\n",
"In some situations, you may want to dipsatch a custom callback event from within a [Runnable](/docs/concepts/#runnable-interface) so it can be surfaced\n",
"in a custom callback handler or via the [Astream Events API](/docs/concepts/#astream_events).\n",
"\n",
"For example, if you have a long running tool with multiple steps, you can dispatch custom events between the steps and use these custom events to monitor progress.\n",
"You could also surface these custom events to an end user of your application to show them how the current task is progressing.\n",
"\n",
"To dispatch a custom event you need to decide on two attributes for the event: the `name` and the `data`.\n",
"\n",
"| Attribute | Type | Description |\n",
"|-----------|------|----------------------------------------------------------------------------------------------------------|\n",
"| name | str | A user defined name for the event. |\n",
"| data | Any | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |\n",
"\n",
"\n",
":::{.callout-important}\n",
"* Dispatching custom callback events requires `langchain-core>=0.2.15`.\n",
"* Custom callback events can only be dispatched from within an existing `Runnable`.\n",
"* If using `astream_events`, you must use `version='v2'` to see custom events.\n",
"* Sending or rendering custom callbacks events in LangSmith is not yet supported.\n",
":::\n",
"\n",
"\n",
":::caution COMPATIBILITY\n",
"LangChain cannot automatically propagate configuration, including callbacks necessary for astream_events(), to child runnables if you are running async code in python<=3.10. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
"\n",
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
"\n",
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in other Python versions.\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain-core"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Astream Events API\n",
"\n",
"The most useful way to consume custom events is via the [Astream Events API](/docs/concepts/#astream_events).\n",
"\n",
"We can use the `async` `adispatch_custom_event` API to emit custom events in an async setting. \n",
"\n",
"\n",
":::{.callout-important}\n",
"\n",
"To see custom events via the astream events API, you need to use the newer `v2` API of `astream_events`.\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'foo', 'tags': [], 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'metadata': {}, 'parent_ids': []}\n",
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
"{'event': 'on_custom_event', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
"{'event': 'on_chain_stream', 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'f354ffe8-4c22-4881-890a-c1cad038a9a6', 'name': 'foo', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
]
}
],
"source": [
"from langchain_core.callbacks.manager import (\n",
" adispatch_custom_event,\n",
")\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_core.runnables.config import RunnableConfig\n",
"\n",
"\n",
"@RunnableLambda\n",
"async def foo(x: str) -> str:\n",
" await adispatch_custom_event(\"event1\", {\"x\": x})\n",
" await adispatch_custom_event(\"event2\", 5)\n",
" return x\n",
"\n",
"\n",
"async for event in foo.astream_events(\"hello world\", version=\"v2\"):\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In python <= 3.10, you must propagate the config manually!"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chain_start', 'data': {'input': 'hello world'}, 'name': 'bar', 'tags': [], 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'metadata': {}, 'parent_ids': []}\n",
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event1', 'tags': [], 'metadata': {}, 'data': {'x': 'hello world'}, 'parent_ids': []}\n",
"{'event': 'on_custom_event', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'event2', 'tags': [], 'metadata': {}, 'data': 5, 'parent_ids': []}\n",
"{'event': 'on_chain_stream', 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'data': {'chunk': 'hello world'}, 'parent_ids': []}\n",
"{'event': 'on_chain_end', 'data': {'output': 'hello world'}, 'run_id': 'c787b09d-698a-41b9-8290-92aaa656f3e7', 'name': 'bar', 'tags': [], 'metadata': {}, 'parent_ids': []}\n"
]
}
],
"source": [
"from langchain_core.callbacks.manager import (\n",
" adispatch_custom_event,\n",
")\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_core.runnables.config import RunnableConfig\n",
"\n",
"\n",
"@RunnableLambda\n",
"async def bar(x: str, config: RunnableConfig) -> str:\n",
" \"\"\"An example that shows how to manually propagate config.\n",
"\n",
" You must do this if you're running python<=3.10.\n",
" \"\"\"\n",
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
" return x\n",
"\n",
"\n",
"async for event in bar.astream_events(\"hello world\", version=\"v2\"):\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Async Callback Handler\n",
"\n",
"You can also consume the dispatched event via an async callback handler."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n",
"Received event event2 with data: 5, with tags: ['foo', 'bar'], with metadata: {} and run_id: a62b84be-7afd-4829-9947-7165df1f37d9\n"
]
},
{
"data": {
"text/plain": [
"1"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List, Optional\n",
"from uuid import UUID\n",
"\n",
"from langchain_core.callbacks import AsyncCallbackHandler\n",
"from langchain_core.callbacks.manager import (\n",
" adispatch_custom_event,\n",
")\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_core.runnables.config import RunnableConfig\n",
"\n",
"\n",
"class AsyncCustomCallbackHandler(AsyncCallbackHandler):\n",
" async def on_custom_event(\n",
" self,\n",
" name: str,\n",
" data: Any,\n",
" *,\n",
" run_id: UUID,\n",
" tags: Optional[List[str]] = None,\n",
" metadata: Optional[Dict[str, Any]] = None,\n",
" **kwargs: Any,\n",
" ) -> None:\n",
" print(\n",
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
" )\n",
"\n",
"\n",
"@RunnableLambda\n",
"async def bar(x: str, config: RunnableConfig) -> str:\n",
" \"\"\"An example that shows how to manually propagate config.\n",
"\n",
" You must do this if you're running python<=3.10.\n",
" \"\"\"\n",
" await adispatch_custom_event(\"event1\", {\"x\": x}, config=config)\n",
" await adispatch_custom_event(\"event2\", 5, config=config)\n",
" return x\n",
"\n",
"\n",
"async_handler = AsyncCustomCallbackHandler()\n",
"await foo.ainvoke(1, {\"callbacks\": [async_handler], \"tags\": [\"foo\", \"bar\"]})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Sync Callback Handler\n",
"\n",
"Let's see how to emit custom events in a sync environment using `dispatch_custom_event`.\n",
"\n",
"You **must** call `dispatch_custom_event` from within an existing `Runnable`."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Received event event1 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n",
"Received event event2 with data: {'x': 1}, with tags: ['foo', 'bar'], with metadata: {} and run_id: 27b5ce33-dc26-4b34-92dd-08a89cb22268\n"
]
},
{
"data": {
"text/plain": [
"1"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List, Optional\n",
"from uuid import UUID\n",
"\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.callbacks.manager import (\n",
" dispatch_custom_event,\n",
")\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_core.runnables.config import RunnableConfig\n",
"\n",
"\n",
"class CustomHandler(BaseCallbackHandler):\n",
" def on_custom_event(\n",
" self,\n",
" name: str,\n",
" data: Any,\n",
" *,\n",
" run_id: UUID,\n",
" tags: Optional[List[str]] = None,\n",
" metadata: Optional[Dict[str, Any]] = None,\n",
" **kwargs: Any,\n",
" ) -> None:\n",
" print(\n",
" f\"Received event {name} with data: {data}, with tags: {tags}, with metadata: {metadata} and run_id: {run_id}\"\n",
" )\n",
"\n",
"\n",
"@RunnableLambda\n",
"def foo(x: int, config: RunnableConfig) -> int:\n",
" dispatch_custom_event(\"event1\", {\"x\": x})\n",
" dispatch_custom_event(\"event2\", {\"x\": x})\n",
" return x\n",
"\n",
"\n",
"handler = CustomHandler()\n",
"foo.invoke(1, {\"callbacks\": [handler], \"tags\": [\"foo\", \"bar\"]})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've seen how to emit custom events, you can check out the more in depth guide for [astream events](/docs/how_to/streaming/#using-stream-events) which is the easiest way to leverage custom events."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -15,12 +15,6 @@
"\n",
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
"\n",
":::\n",
"\n",
":::info Requires ``langchain >= 0.2.8``\n",
"\n",
"This functionality was added in ``langchain-core == 0.2.8``. Please make sure your package is up to date.\n",
"\n",
":::"
]
},
@@ -31,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
"%pip install -qU langchain langchain-openai langchain-anthropic langchain-google-vertexai"
]
},
{
@@ -82,6 +76,32 @@
"print(\"Gemini 1.5: \" + gemini_15.invoke(\"what's your name\").content + \"\\n\")"
]
},
{
"cell_type": "markdown",
"id": "fff9a4c8-b6ee-4a1a-8d3d-0ecaa312d4ed",
"metadata": {},
"source": [
"## Simple config example"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "75c25d39-bf47-4b51-a6c6-64d9c572bfd6",
"metadata": {},
"outputs": [],
"source": [
"user_config = {\n",
" \"model\": \"...user-specified...\",\n",
" \"model_provider\": \"...user-specified...\",\n",
" \"temperature\": 0,\n",
" \"max_tokens\": 1000,\n",
"}\n",
"\n",
"llm = init_chat_model(**user_config)\n",
"llm.invoke(\"what's your name\")"
]
},
{
"cell_type": "markdown",
"id": "f811f219-5e78-4b62-b495-915d52a22532",
@@ -104,216 +124,13 @@
"gemini_15 = init_chat_model(\"gemini-1.5-pro\", temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "476a44db-c50d-4846-951d-0f1c9ba8bbaa",
"metadata": {},
"source": [
"## Creating a configurable model\n",
"\n",
"You can also create a runtime-configurable model by specifying `configurable_fields`. If you don't specify a `model` value, then \"model\" and \"model_provider\" be configurable by default."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "6c037f27-12d7-4e83-811e-4245c0e3ba58",
"execution_count": null,
"id": "da07b5c0-d2e6-42e4-bfcd-2efcfaae6221",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_d576307f90', 'finish_reason': 'stop', 'logprobs': None}, id='run-5428ab5c-b5c0-46de-9946-5d4ca40dbdc8-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"configurable_model = init_chat_model(temperature=0)\n",
"\n",
"configurable_model.invoke(\n",
" \"what's your name\", config={\"configurable\": {\"model\": \"gpt-4o\"}}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "321e3036-abd2-4e1f-bcc6-606efd036954",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_012XvotUJ3kGLXJUWKBVxJUi', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-1ad1eefe-f1c6-4244-8bc6-90e2cb7ee554-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"configurable_model.invoke(\n",
" \"what's your name\", config={\"configurable\": {\"model\": \"claude-3-5-sonnet-20240620\"}}\n",
")"
]
},
{
"cell_type": "markdown",
"id": "7f3b3d4a-4066-45e4-8297-ea81ac8e70b7",
"metadata": {},
"source": [
"### Configurable model with default values\n",
"\n",
"We can create a configurable model with default model values, specify which parameters are configurable, and add prefixes to configurable params:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "814a2289-d0db-401e-b555-d5116112b413",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"I'm an AI language model created by OpenAI, and I don't have a personal name. You can call me Assistant or any other name you prefer! How can I assist you today?\", response_metadata={'token_usage': {'completion_tokens': 37, 'prompt_tokens': 11, 'total_tokens': 48}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_ce0793330f', 'finish_reason': 'stop', 'logprobs': None}, id='run-3923e328-7715-4cd6-b215-98e4b6bf7c9d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 37, 'total_tokens': 48})"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"first_llm = init_chat_model(\n",
" model=\"gpt-4o\",\n",
" temperature=0,\n",
" configurable_fields=(\"model\", \"model_provider\", \"temperature\", \"max_tokens\"),\n",
" config_prefix=\"first\", # useful when you have a chain with multiple models\n",
")\n",
"\n",
"first_llm.invoke(\"what's your name\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "6c8755ba-c001-4f5a-a497-be3f1db83244",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"My name is Claude. It's nice to meet you!\", response_metadata={'id': 'msg_01RyYR64DoMPNCfHeNnroMXm', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 11, 'output_tokens': 15}}, id='run-22446159-3723-43e6-88df-b84797e7751d-0', usage_metadata={'input_tokens': 11, 'output_tokens': 15, 'total_tokens': 26})"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"first_llm.invoke(\n",
" \"what's your name\",\n",
" config={\n",
" \"configurable\": {\n",
" \"first_model\": \"claude-3-5-sonnet-20240620\",\n",
" \"first_temperature\": 0.5,\n",
" \"first_max_tokens\": 100,\n",
" }\n",
" },\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0072b1a3-7e44-4b4e-8b07-efe1ba91a689",
"metadata": {},
"source": [
"### Using a configurable model declaratively\n",
"\n",
"We can call declarative operations like `bind_tools`, `with_structured_output`, `with_configurable`, etc. on a configurable model and chain a configurable model in the same way that we would a regularly instantiated chat model object."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "067dabee-1050-4110-ae24-c48eba01e13b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetPopulation',\n",
" 'args': {'location': 'Los Angeles, CA'},\n",
" 'id': 'call_sYT3PFMufHGWJD32Hi2CTNUP'},\n",
" {'name': 'GetPopulation',\n",
" 'args': {'location': 'New York, NY'},\n",
" 'id': 'call_j1qjhxRnD3ffQmRyqjlI1Lnk'}]"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",
" \"\"\"Get the current weather in a given location\"\"\"\n",
"\n",
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
"\n",
"\n",
"class GetPopulation(BaseModel):\n",
" \"\"\"Get the current population in a given location\"\"\"\n",
"\n",
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
"\n",
"\n",
"llm = init_chat_model(temperature=0)\n",
"llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])\n",
"\n",
"llm_with_tools.invoke(\n",
" \"what's bigger in 2024 LA or NYC\", config={\"configurable\": {\"model\": \"gpt-4o\"}}\n",
").tool_calls"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "e57dfe9f-cd24-4e37-9ce9-ccf8daf78f89",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetPopulation',\n",
" 'args': {'location': 'Los Angeles, CA'},\n",
" 'id': 'toolu_01CxEHxKtVbLBrvzFS7GQ5xR'},\n",
" {'name': 'GetPopulation',\n",
" 'args': {'location': 'New York City, NY'},\n",
" 'id': 'toolu_013A79qt5toWSsKunFBDZd5S'}]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_tools.invoke(\n",
" \"what's bigger in 2024 LA or NYC\",\n",
" config={\"configurable\": {\"model\": \"claude-3-5-sonnet-20240620\"}},\n",
").tool_calls"
]
"outputs": [],
"source": []
}
],
"metadata": {
@@ -332,7 +149,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -16,7 +16,7 @@
"\n",
"Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
"\n",
"This guide requires `langchain-openai >= 0.1.9`."
"This guide requires `langchain-openai >= 0.1.8`."
]
},
{
@@ -153,7 +153,7 @@
"\n",
"#### OpenAI\n",
"\n",
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.9` and can be enabled by setting `stream_usage=True`. This attribute can also be set when `ChatOpenAI` is instantiated.\n",
"For example, OpenAI will return a message [chunk](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.ai.AIMessageChunk.html) at the end of a stream with token usage information. This behavior is supported by `langchain-openai >= 0.1.8` and can be enabled by setting `stream_options={\"include_usage\": True}`.\n",
"\n",
"```{=mdx}\n",
":::note\n",
@@ -172,18 +172,18 @@
"name": "stdout",
"output_type": "stream",
"text": [
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='Hello' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='!' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' How' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' can' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' I' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' assist' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' you' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content=' today' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='?' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623'\n",
"content='' id='run-adb20c31-60c7-43a2-99b2-d4a53ca5f623' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='Hello' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='!' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' How' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' can' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' I' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' assist' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' you' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content=' today' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='?' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='' response_metadata={'finish_reason': 'stop'} id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf'\n",
"content='' id='run-b40e502e-d30e-4617-94ad-95b4dfee14bf' usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17}\n"
]
}
],
@@ -191,7 +191,7 @@
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"\n",
"aggregate = None\n",
"for chunk in llm.stream(\"hello\", stream_usage=True):\n",
"for chunk in llm.stream(\"hello\", stream_options={\"include_usage\": True}):\n",
" print(chunk)\n",
" aggregate = chunk if aggregate is None else aggregate + chunk"
]
@@ -229,7 +229,7 @@
"id": "7dba63e8-0ed7-4533-8f0f-78e19c38a25c",
"metadata": {},
"source": [
"To disable streaming token counts for OpenAI, set `stream_usage` to False, or omit it from the parameters:"
"To disable streaming token counts for OpenAI, set `\"include_usage\"` to False in `stream_options`, or omit it from the parameters:"
]
},
{
@@ -242,17 +242,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
"content='' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='Hello' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='!' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' How' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' can' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' I' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' assist' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' you' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content=' today' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='?' id='run-8e758550-94b0-4cca-a298-57482793c25d'\n",
"content='' response_metadata={'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'} id='run-8e758550-94b0-4cca-a298-57482793c25d'\n"
"content='' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='Hello' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='!' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' How' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' can' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' I' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' assist' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' you' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content=' today' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='?' id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n",
"content='' response_metadata={'finish_reason': 'stop'} id='run-0085d64c-13d2-431b-a0fa-399be8cd3c52'\n"
]
}
],
@@ -267,7 +267,7 @@
"id": "6a5d9617-be3a-419a-9276-de9c29fa50ae",
"metadata": {},
"source": [
"You can also enable streaming token usage by setting `stream_usage` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
"You can also enable streaming token usage by setting `model_kwargs` when instantiating the chat model. This can be useful when incorporating chat models into LangChain [chains](/docs/concepts#langchain-expression-language-lcel): usage metadata can be monitored when [streaming intermediate steps](/docs/how_to/streaming#using-stream-events) or using tracing software such as [LangSmith](https://docs.smith.langchain.com/).\n",
"\n",
"See the below example, where we return output structured to a desired schema, but can still observe token usage streamed from intermediate steps."
]
@@ -275,7 +275,7 @@
{
"cell_type": "code",
"execution_count": 8,
"id": "0b1523d8-127e-4314-82fa-bd97aca37f9a",
"id": "57dec1fb-bd9c-4c98-8798-8fbbe67f6b2c",
"metadata": {},
"outputs": [
{
@@ -301,7 +301,7 @@
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" stream_usage=True,\n",
" model_kwargs={\"stream_options\": {\"include_usage\": True}},\n",
")\n",
"# Under the hood, .with_structured_output binds tools to the\n",
"# chat model and appends a parser.\n",
@@ -341,7 +341,7 @@
{
"cell_type": "code",
"execution_count": 9,
"id": "b04a4486-72fd-48ce-8f9e-5d281b441195",
"id": "31667d54",
"metadata": {},
"outputs": [
{
@@ -361,11 +361,7 @@
"\n",
"from langchain_community.callbacks.manager import get_openai_callback\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-3.5-turbo-0125\",\n",
" temperature=0,\n",
" stream_usage=True,\n",
")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"\n",
"with get_openai_callback() as cb:\n",
" result = llm.invoke(\"Tell me a joke\")\n",
@@ -383,14 +379,14 @@
{
"cell_type": "code",
"execution_count": 10,
"id": "05f22a1d-b021-490f-8840-f628a07459f2",
"id": "e09420f4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"54\n"
"55\n"
]
}
],
@@ -401,29 +397,37 @@
" print(cb.total_tokens)"
]
},
{
"cell_type": "markdown",
"id": "9ac51188-c8f4-4230-90fd-3cd78cdd955d",
"metadata": {},
"source": [
"```{=mdx}\n",
":::note\n",
"Cost information is currently not available in streaming mode. This is because model names are currently not propagated through chunks in streaming mode, and the model name is used to look up the correct pricing. Token counts however are available:\n",
":::\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c00c9158-7bb4-4279-88e6-ea70f46e6ac2",
"id": "b241069a-265d-4497-af34-b0a5f95ae67f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tokens Used: 27\n",
"\tPrompt Tokens: 11\n",
"\tCompletion Tokens: 16\n",
"Successful Requests: 1\n",
"Total Cost (USD): $2.95e-05\n"
"28\n"
]
}
],
"source": [
"with get_openai_callback() as cb:\n",
" for chunk in llm.stream(\"Tell me a joke\"):\n",
" for chunk in llm.stream(\"Tell me a joke\", stream_options={\"include_usage\": True}):\n",
" pass\n",
" print(cb)"
" print(cb.total_tokens)"
]
},
{
@@ -453,7 +457,21 @@
")\n",
"tools = load_tools([\"wikipedia\"])\n",
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor(\n",
" agent=agent, tools=tools, verbose=True, stream_runnable=False\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9c1ae74d-8300-4041-9ff4-66093ee592b1",
"metadata": {},
"source": [
"```{=mdx}\n",
":::note\n",
"We have to set `stream_runnable=False` for cost information, as described above. By default the AgentExecutor will stream the underlying agent so that you can get the most granular results when streaming events via AgentExecutor.stream_events.\n",
":::\n",
"```"
]
},
{
@@ -485,30 +503,36 @@
"\n",
"\n",
"\n",
"Page: Allen's hummingbird\n",
"Summary: Allen's hummingbird (Selasphorus sasin) is a species of hummingbird that breeds in the western United States. It is one of seven species in the genus Selasphorus.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Page: Anna's hummingbird\n",
"Summary: Anna's hummingbird (Calypte anna) is a North American species of hummingbird. It was named after Anna Masséna, Duchess of Rivoli.\n",
"It is native to western coastal regions of North America. In the early 20th century, Anna's hummingbirds bred only in northern Baja California and Southern California. The transplanting of exotic ornamental plants in residential areas throughout the Pacific coast and inland deserts provided expanded nectar and nesting sites, allowing the species to expand its breeding range. Year-round residence of Anna's hummingbirds in the Pacific Northwest is an example of ecological release dependent on acclimation to colder winter temperatures, introduced plants, and human provision of nectar feeders during winter.\n",
"These birds feed on nectar from flowers using a long extendable tongue. They also consume small insects and other arthropods caught in flight or gleaned from vegetation.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Invoking: `wikipedia` with `{'query': 'fastest bird species'}`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of birds by flight speed\n",
"Summary: This is a list of the fastest flying birds in the world. A bird's velocity is necessarily variable; a hunting bird will reach much greater speeds while diving to catch prey than when flying horizontally. The bird that can achieve the greatest airspeed is the peregrine falcon (Falco peregrinus), able to exceed 320 km/h (200 mph) in its dives. A close relative of the common swift, the white-throated needletail (Hirundapus caudacutus), is commonly reported as the fastest bird in level flight with a reported top speed of 169 km/h (105 mph). This record remains unconfirmed as the measurement methods have never been published or verified. The record for the fastest confirmed level flight by a bird is 111.5 km/h (69.3 mph) held by the common swift.\n",
"\n",
"\n",
"\n",
"Page: Fastest animals\n",
"Summary: This is a list of the fastest animals in the world, by types of animal.\n",
"\n",
"\n",
"\n",
"Page: Falcon\n",
"Summary: Falcons () are birds of prey in the genus Falco, which includes about 40 species. Falcons are widely distributed on all continents of the world except Antarctica, though closely related raptors did occur there in the Eocene.\n",
"Adult falcons have thin, tapered wings, which enable them to fly at high speed and change direction rapidly. Fledgling falcons, in their first year of flying, have longer flight feathers, which make their configuration more like that of a general-purpose bird such as a broad wing. This makes flying easier while learning the exceptional skills required to be effective hunters as adults.\n",
"The falcons are the largest genus in the Falconinae subfamily of Falconidae, which itself also includes another subfamily comprising caracaras and a few other species. All these birds kill with their beaks, using a tomial \"tooth\" on the side of their beaks—unlike the hawks, eagles, and other birds of prey in the Accipitridae, which use their feet.\n",
"The largest falcon is the gyrfalcon at up to 65 cm in length. The smallest falcon species is the pygmy falcon, which measures just 20 cm. As with hawks and owls, falcons exhibit sexual dimorphism, with the females typically larger than the males, thus allowing a wider range of prey species.\n",
"Some small falcons with long, narrow wings are called \"hobbies\" and some which hover while hunting are called \"kestrels\".\n",
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species in level flight is the common swift, which holds the record for the fastest confirmed level flight by a bird at 111.5 km/h (69.3 mph). The peregrine falcon is known to exceed speeds of 320 km/h (200 mph) in its dives, making it the fastest bird in terms of diving speed.\u001b[0m\n",
"As is the case with many birds of prey, falcons have exceptional powers of vision; the visual acuity of one species has been measured at 2.6 times that of a normal human. Peregrine falcons have been recorded diving at speeds of 320 km/h (200 mph), making them the fastest-moving creatures on Earth; the fastest recorded dive attained a vertical speed of 390 km/h (240 mph).\u001b[0m\u001b[32;1m\u001b[1;3mThe scientific name for a hummingbird is Trochilidae. The fastest bird species is the peregrine falcon (Falco peregrinus), which can exceed speeds of 320 km/h (200 mph) in its dives.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"Total Tokens: 1675\n",
"Prompt Tokens: 1538\n",
"Completion Tokens: 137\n",
"Total Cost (USD): $0.0009745000000000001\n"
"Total Tokens: 1787\n",
"Prompt Tokens: 1687\n",
"Completion Tokens: 100\n",
"Total Cost (USD): $0.0009935\n"
]
}
],

View File

@@ -300,7 +300,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 8,
"id": "ac9295d3",
"metadata": {},
"outputs": [],
@@ -312,8 +312,10 @@
"\n",
"## Quick Install\n",
"\n",
"```bash\n",
"# Hopefully this code block isn't split\n",
"pip install langchain\n",
"```\n",
"\n",
"As an open-source project in a rapidly developing field, we are extremely open to contributions.\n",
"\"\"\""
@@ -321,7 +323,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 9,
"id": "3a0cb17a",
"metadata": {},
"outputs": [
@@ -330,14 +332,15 @@
"text/plain": [
"[Document(page_content='# 🦜️🔗 LangChain'),\n",
" Document(page_content='⚡ Building applications with LLMs through composability ⚡'),\n",
" Document(page_content='## Quick Install'),\n",
" Document(page_content='## Quick Install\\n\\n```bash'),\n",
" Document(page_content=\"# Hopefully this code block isn't split\"),\n",
" Document(page_content='pip install langchain'),\n",
" Document(page_content='```'),\n",
" Document(page_content='As an open-source project in a rapidly developing field, we'),\n",
" Document(page_content='are extremely open to contributions.')]"
]
},
"execution_count": 3,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@@ -739,7 +742,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.4"
}
},
"nbformat": 4,

View File

@@ -48,10 +48,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "40ed76a2",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai\n",
"\n",

View File

@@ -220,57 +220,6 @@
"pretty_print_docs(compressed_docs)"
]
},
{
"cell_type": "markdown",
"id": "14002ec8-7ee5-4f91-9315-dd21c3808776",
"metadata": {},
"source": [
"### `LLMListwiseRerank`\n",
"\n",
"[LLMListwiseRerank](https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.document_compressors.listwise_rerank.LLMListwiseRerank.html) uses [zero-shot listwise document reranking](https://arxiv.org/pdf/2305.02156) and functions similarly to `LLMChainFilter` as a robust but more expensive option. It is recommended to use a more powerful LLM.\n",
"\n",
"Note that `LLMListwiseRerank` requires a model with the [with_structured_output](/docs/integrations/chat/) method implemented."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "4ab9ee9f-917e-4d6f-9344-eb7f01533228",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Document 1:\n",
"\n",
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while youre at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
"\n",
"Tonight, Id like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
"\n",
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
"\n",
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence.\n"
]
}
],
"source": [
"from langchain.retrievers.document_compressors import LLMListwiseRerank\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)\n",
"\n",
"_filter = LLMListwiseRerank.from_llm(llm, top_n=1)\n",
"compression_retriever = ContextualCompressionRetriever(\n",
" base_compressor=_filter, base_retriever=retriever\n",
")\n",
"\n",
"compressed_docs = compression_retriever.invoke(\n",
" \"What did the president say about Ketanji Jackson Brown\"\n",
")\n",
"pretty_print_docs(compressed_docs)"
]
},
{
"cell_type": "markdown",
"id": "7194da42",
@@ -346,7 +295,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"id": "617a1756",
"metadata": {},
"outputs": [],

View File

@@ -1,549 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "9a8bceb3-95bd-4496-bb9e-57655136e070",
"metadata": {},
"source": [
"# How to convert Runnables as Tools\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Runnables](/docs/concepts#runnable-interface)\n",
"- [Tools](/docs/concepts#tools)\n",
"- [Agents](/docs/tutorials/agents)\n",
"\n",
":::\n",
"\n",
"Here we will demonstrate how to convert a LangChain `Runnable` into a tool that can be used by agents, chains, or chat models.\n",
"\n",
"## Dependencies\n",
"\n",
"**Note**: this guide requires `langchain-core` >= 0.2.13. We will also use [OpenAI](/docs/integrations/platforms/openai/) for embeddings, but any LangChain embeddings should suffice. We will use a simple [LangGraph](https://langchain-ai.github.io/langgraph/) agent for demonstration purposes."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "92341f48-2c29-4ce9-8ab8-0a7c7a7c98a1",
"metadata": {},
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"%pip install -U langchain-core langchain-openai langgraph"
]
},
{
"cell_type": "markdown",
"id": "2b0dcc1a-48e8-4a81-b920-3563192ce076",
"metadata": {},
"source": [
"LangChain [tools](/docs/concepts#tools) are interfaces that an agent, chain, or chat model can use to interact with the world. See [here](/docs/how_to/#tools) for how-to guides covering tool-calling, built-in tools, custom tools, and more information.\n",
"\n",
"LangChain tools-- instances of [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html)-- are [Runnables](/docs/concepts/#runnable-interface) with additional constraints that enable them to be invoked effectively by language models:\n",
"\n",
"- Their inputs are constrained to be serializable, specifically strings and Python `dict` objects;\n",
"- They contain names and descriptions indicating how and when they should be used;\n",
"- They may contain a detailed [args_schema](https://python.langchain.com/v0.2/docs/how_to/custom_tools/) for their arguments. That is, while a tool (as a `Runnable`) might accept a single `dict` input, the specific keys and type information needed to populate a dict should be specified in the `args_schema`.\n",
"\n",
"Runnables that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments."
]
},
{
"cell_type": "markdown",
"id": "b4d76680-1b6b-4862-8c4f-22766a1d41f2",
"metadata": {},
"source": [
"## Basic usage\n",
"\n",
"With typed `dict` input:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b2cc4231-64a3-4733-a284-932dcbf2fcc3",
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"\n",
"from langchain_core.runnables import RunnableLambda\n",
"from typing_extensions import TypedDict\n",
"\n",
"\n",
"class Args(TypedDict):\n",
" a: int\n",
" b: List[int]\n",
"\n",
"\n",
"def f(x: Args) -> str:\n",
" return str(x[\"a\"] * max(x[\"b\"]))\n",
"\n",
"\n",
"runnable = RunnableLambda(f)\n",
"as_tool = runnable.as_tool(\n",
" name=\"My tool\",\n",
" description=\"Explanation of when to use tool.\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "57f2d435-624d-459a-903d-8509fbbde610",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Explanation of when to use tool.\n"
]
},
{
"data": {
"text/plain": [
"{'title': 'My tool',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A', 'type': 'integer'},\n",
" 'b': {'title': 'B', 'type': 'array', 'items': {'type': 'integer'}}},\n",
" 'required': ['a', 'b']}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"print(as_tool.description)\n",
"\n",
"as_tool.args_schema.schema()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "54ae7384-a03d-4fa4-8cdf-9604a4bc39ee",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'6'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"as_tool.invoke({\"a\": 3, \"b\": [1, 2]})"
]
},
{
"cell_type": "markdown",
"id": "9038f587-4613-4f50-b349-135f9e7e3b15",
"metadata": {},
"source": [
"Without typing information, arg types can be specified via `arg_types`:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "169f733c-4936-497f-8577-ee769dc16b88",
"metadata": {},
"outputs": [],
"source": [
"from typing import Any, Dict\n",
"\n",
"\n",
"def g(x: Dict[str, Any]) -> str:\n",
" return str(x[\"a\"] * max(x[\"b\"]))\n",
"\n",
"\n",
"runnable = RunnableLambda(g)\n",
"as_tool = runnable.as_tool(\n",
" name=\"My tool\",\n",
" description=\"Explanation of when to use tool.\",\n",
" arg_types={\"a\": int, \"b\": List[int]},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "32b1a992-8997-4c98-8eb2-c9fe9431b799",
"metadata": {},
"source": [
"Alternatively, the schema can be fully specified by directly passing the desired [args_schema](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool.args_schema) for the tool:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "eb102705-89b7-48dc-9158-d36d5f98ae8e",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GSchema(BaseModel):\n",
" \"\"\"Apply a function to an integer and list of integers.\"\"\"\n",
"\n",
" a: int = Field(..., description=\"Integer\")\n",
" b: List[int] = Field(..., description=\"List of ints\")\n",
"\n",
"\n",
"runnable = RunnableLambda(g)\n",
"as_tool = runnable.as_tool(GSchema)"
]
},
{
"cell_type": "markdown",
"id": "7c474d85-4e01-4fae-9bba-0c6c8c26475c",
"metadata": {},
"source": [
"String input is also supported:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "c475282a-58d6-4c2b-af7d-99b73b7d8a13",
"metadata": {},
"outputs": [],
"source": [
"def f(x: str) -> str:\n",
" return x + \"a\"\n",
"\n",
"\n",
"def g(x: str) -> str:\n",
" return x + \"z\"\n",
"\n",
"\n",
"runnable = RunnableLambda(f) | g\n",
"as_tool = runnable.as_tool()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "ad6d8d96-3a87-40bd-a2ac-44a8acde0a8e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'baz'"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"as_tool.invoke(\"b\")"
]
},
{
"cell_type": "markdown",
"id": "89fdb3a7-d228-48f0-8f73-262af4febb58",
"metadata": {},
"source": [
"## In agents\n",
"\n",
"Below we will incorporate LangChain Runnables as tools in an [agent](/docs/concepts/#agents) application. We will demonstrate with:\n",
"\n",
"- a document [retriever](/docs/concepts/#retrievers);\n",
"- a simple [RAG](/docs/tutorials/rag/) chain, allowing an agent to delegate relevant queries to it.\n",
"\n",
"We first instantiate a chat model that supports [tool calling](/docs/how_to/tool_calling/):\n",
"\n",
"```{=mdx}\n",
"<ChatModelTabs\n",
" customVarName=\"llm\"\n",
"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "d06c9f2a-4475-450f-9106-54db1d99623b",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "e8a2038a-d762-4196-b5e3-fdb89c11e71d",
"metadata": {},
"source": [
"Following the [RAG tutorial](/docs/tutorials/rag/), let's first construct a retriever:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "23d2a47e-6712-4294-81c8-2c1d76b4bb81",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.documents import Document\n",
"from langchain_core.vectorstores import InMemoryVectorStore\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"documents = [\n",
" Document(\n",
" page_content=\"Dogs are great companions, known for their loyalty and friendliness.\",\n",
" ),\n",
" Document(\n",
" page_content=\"Cats are independent pets that often enjoy their own space.\",\n",
" ),\n",
"]\n",
"\n",
"vectorstore = InMemoryVectorStore.from_documents(\n",
" documents, embedding=OpenAIEmbeddings()\n",
")\n",
"\n",
"retriever = vectorstore.as_retriever(\n",
" search_type=\"similarity\",\n",
" search_kwargs={\"k\": 1},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9ba737ac-43a2-4a6f-b855-5bd0305017f1",
"metadata": {},
"source": [
"We next create use a simple pre-built [LangGraph agent](https://python.langchain.com/v0.2/docs/tutorials/agents/) and provide it the tool:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c939cf2a-60e9-4afd-8b47-84d76ccb13f5",
"metadata": {},
"outputs": [],
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"tools = [\n",
" retriever.as_tool(\n",
" name=\"pet_info_retriever\",\n",
" description=\"Get information about pets.\",\n",
" )\n",
"]\n",
"agent = create_react_agent(llm, tools)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "be29437b-a187-4a0a-9a5d-419c56f2434e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD', 'function': {'arguments': '{\"__arg1\":\"dogs\"}', 'name': 'pet_info_retriever'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 60, 'total_tokens': 79}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-d7f81de9-1fb7-4caf-81ed-16dcdb0b2ab4-0', tool_calls=[{'name': 'pet_info_retriever', 'args': {'__arg1': 'dogs'}, 'id': 'call_W8cnfOjwqEn4cFcg19LN9mYD'}], usage_metadata={'input_tokens': 60, 'output_tokens': 19, 'total_tokens': 79})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content=\"[Document(id='86f835fe-4bbe-4ec6-aeb4-489a8b541707', page_content='Dogs are great companions, known for their loyalty and friendliness.')]\", name='pet_info_retriever', tool_call_id='call_W8cnfOjwqEn4cFcg19LN9mYD')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='Dogs are known for being great companions, known for their loyalty and friendliness.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 134, 'total_tokens': 152}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9ca5847a-a5eb-44c0-a774-84cc2c5bbc5b-0', usage_metadata={'input_tokens': 134, 'output_tokens': 18, 'total_tokens': 152})]}}\n",
"----\n"
]
}
],
"source": [
"for chunk in agent.stream({\"messages\": [(\"human\", \"What are dogs known for?\")]}):\n",
" print(chunk)\n",
" print(\"----\")"
]
},
{
"cell_type": "markdown",
"id": "96f2ac9c-36f4-4b7a-ae33-f517734c86aa",
"metadata": {},
"source": [
"See [LangSmith trace](https://smith.langchain.com/public/44e438e3-2faf-45bd-b397-5510fc145eb9/r) for the above run."
]
},
{
"cell_type": "markdown",
"id": "a722fd8a-b957-4ba7-b408-35596b76835f",
"metadata": {},
"source": [
"Going further, we can create a simple [RAG](/docs/tutorials/rag/) chain that takes an additional parameter-- here, the \"style\" of the answer."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "bea518c9-c711-47c2-b8cc-dbd102f71f09",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"system_prompt = \"\"\"\n",
"You are an assistant for question-answering tasks.\n",
"Use the below context to answer the question. If\n",
"you don't know the answer, say you don't know.\n",
"Use three sentences maximum and keep the answer\n",
"concise.\n",
"\n",
"Answer in the style of {answer_style}.\n",
"\n",
"Question: {question}\n",
"\n",
"Context: {context}\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages([(\"system\", system_prompt)])\n",
"\n",
"rag_chain = (\n",
" {\n",
" \"context\": itemgetter(\"question\") | retriever,\n",
" \"question\": itemgetter(\"question\"),\n",
" \"answer_style\": itemgetter(\"answer_style\"),\n",
" }\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "markdown",
"id": "955a23db-5218-4c34-8486-450a2ddb3443",
"metadata": {},
"source": [
"Note that the input schema for our chain contains the required arguments, so it converts to a tool without further specification:"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "2c9f6e61-80ed-4abb-8e77-84de3ccbc891",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'RunnableParallel<context,question,answer_style>Input',\n",
" 'type': 'object',\n",
" 'properties': {'question': {'title': 'Question'},\n",
" 'answer_style': {'title': 'Answer Style'}}}"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rag_chain.input_schema.schema()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "a3f9cf5b-8c71-4b0f-902b-f92e028780c9",
"metadata": {},
"outputs": [],
"source": [
"rag_tool = rag_chain.as_tool(\n",
" name=\"pet_expert\",\n",
" description=\"Get information about pets.\",\n",
")"
]
},
{
"cell_type": "markdown",
"id": "4570615b-8f96-4d97-ae01-1c08b14be584",
"metadata": {},
"source": [
"Below we again invoke the agent. Note that the agent populates the required parameters in its `tool_calls`:"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "06409913-a2ad-400f-a202-7b8dd2ef483a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_17iLPWvOD23zqwd1QVQ00Y63', 'function': {'arguments': '{\"question\":\"What are dogs known for according to pirates?\",\"answer_style\":\"quote\"}', 'name': 'pet_expert'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 28, 'prompt_tokens': 59, 'total_tokens': 87}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7fef44f3-7bba-4e63-8c51-2ad9c5e65e2e-0', tool_calls=[{'name': 'pet_expert', 'args': {'question': 'What are dogs known for according to pirates?', 'answer_style': 'quote'}, 'id': 'call_17iLPWvOD23zqwd1QVQ00Y63'}], usage_metadata={'input_tokens': 59, 'output_tokens': 28, 'total_tokens': 87})]}}\n",
"----\n",
"{'tools': {'messages': [ToolMessage(content='\"Dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.\"', name='pet_expert', tool_call_id='call_17iLPWvOD23zqwd1QVQ00Y63')]}}\n",
"----\n",
"{'agent': {'messages': [AIMessage(content='According to pirates, dogs are known for their loyalty and friendliness, making them great companions for pirates on long sea voyages.', response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 119, 'total_tokens': 146}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a30edc3-7be0-4743-b980-ca2f8cad9b8d-0', usage_metadata={'input_tokens': 119, 'output_tokens': 27, 'total_tokens': 146})]}}\n",
"----\n"
]
}
],
"source": [
"agent = create_react_agent(llm, [rag_tool])\n",
"\n",
"for chunk in agent.stream(\n",
" {\"messages\": [(\"human\", \"What would a pirate say dogs are known for?\")]}\n",
"):\n",
" print(chunk)\n",
" print(\"----\")"
]
},
{
"cell_type": "markdown",
"id": "96cc9bc3-e79e-49a8-9915-428ea225358b",
"metadata": {},
"source": [
"See [LangSmith trace](https://smith.langchain.com/public/147ae4e6-4dfb-4dd9-8ca0-5c5b954f08ac/r) for the above run."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -131,7 +131,7 @@
"source": [
"## Base Chat Model\n",
"\n",
"Let's implement a chat model that echoes back the first `n` characters of the last message in the prompt!\n",
"Let's implement a chat model that echoes back the first `n` characetrs of the last message in the prompt!\n",
"\n",
"To do so, we will inherit from `BaseChatModel` and we'll need to implement the following:\n",
"\n",

View File

@@ -5,7 +5,7 @@
"id": "5436020b",
"metadata": {},
"source": [
"# How to create tools\n",
"# How to create custom tools\n",
"\n",
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
"\n",
@@ -16,15 +16,13 @@
"| args_schema | Pydantic BaseModel | Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters |\n",
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
"\n",
"LangChain supports the creation of tools from:\n",
"LangChain provides 3 ways to create tools:\n",
"\n",
"1. Functions;\n",
"2. LangChain [Runnables](/docs/concepts#runnable-interface);\n",
"1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool.\n",
"2. Using [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations.\n",
"3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
"\n",
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method.\n",
"\n",
"In this guide we provide an overview of these methods.\n",
"The `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases.\n",
"\n",
":::{.callout-tip}\n",
"\n",
@@ -37,9 +35,7 @@
"id": "c7326b23",
"metadata": {},
"source": [
"## Creating tools from functions\n",
"\n",
"### @tool decorator\n",
"## @tool decorator\n",
"\n",
"This `@tool` decorator is the simplest way to define a custom tool. The decorator uses the function name as the tool name by default, but this can be overridden by passing a string as the first argument. Additionally, the decorator will use the function's docstring as the tool's description - so a docstring MUST be provided. "
]
@@ -55,7 +51,7 @@
"output_type": "stream",
"text": [
"multiply\n",
"Multiply two numbers.\n",
"multiply(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}}\n"
]
}
@@ -100,57 +96,6 @@
" return a * b"
]
},
{
"cell_type": "markdown",
"id": "8f0edc51-c586-414c-8941-c8abe779943f",
"metadata": {},
"source": [
"Note that `@tool` supports parsing of annotations, nested schemas, and other features:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "5626423f-053e-4a66-adca-1d794d835397",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'multiply_by_maxSchema',\n",
" 'description': 'Multiply a by the maximum of b.',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A',\n",
" 'description': 'scale factor',\n",
" 'type': 'string'},\n",
" 'b': {'title': 'B',\n",
" 'description': 'list of ints over which to take maximum',\n",
" 'type': 'array',\n",
" 'items': {'type': 'integer'}}},\n",
" 'required': ['a', 'b']}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Annotated, List\n",
"\n",
"\n",
"@tool\n",
"def multiply_by_max(\n",
" a: Annotated[str, \"scale factor\"],\n",
" b: Annotated[List[int], \"list of ints over which to take maximum\"],\n",
") -> int:\n",
" \"\"\"Multiply a by the maximum of b.\"\"\"\n",
" return a * max(b)\n",
"\n",
"\n",
"multiply_by_max.args_schema.schema()"
]
},
{
"cell_type": "markdown",
"id": "98d6eee9",
@@ -161,7 +106,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
"metadata": {},
"outputs": [
@@ -170,7 +115,7 @@
"output_type": "stream",
"text": [
"multiplication-tool\n",
"Multiply two numbers.\n",
"multiplication-tool(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"True\n"
]
@@ -198,84 +143,19 @@
"print(multiply.return_direct)"
]
},
{
"cell_type": "markdown",
"id": "33a9e94d-0b60-48f3-a4c2-247dce096e66",
"metadata": {},
"source": [
"#### Docstring parsing"
]
},
{
"cell_type": "markdown",
"id": "6d0cb586-93d4-4ff1-9779-71df7853cb68",
"metadata": {},
"source": [
"`@tool` can optionally parse [Google Style docstrings](https://google.github.io/styleguide/pyguide.html#383-functions-and-methods) and associate the docstring components (such as arg descriptions) to the relevant parts of the tool schema. To toggle this behavior, specify `parse_docstring`:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "336f5538-956e-47d5-9bde-b732559f9e61",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'fooSchema',\n",
" 'description': 'The foo.',\n",
" 'type': 'object',\n",
" 'properties': {'bar': {'title': 'Bar',\n",
" 'description': 'The bar.',\n",
" 'type': 'string'},\n",
" 'baz': {'title': 'Baz', 'description': 'The baz.', 'type': 'integer'}},\n",
" 'required': ['bar', 'baz']}"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"@tool(parse_docstring=True)\n",
"def foo(bar: str, baz: int) -> str:\n",
" \"\"\"The foo.\n",
"\n",
" Args:\n",
" bar: The bar.\n",
" baz: The baz.\n",
" \"\"\"\n",
" return bar\n",
"\n",
"\n",
"foo.args_schema.schema()"
]
},
{
"cell_type": "markdown",
"id": "f18a2503-5393-421b-99fa-4a01dd824d0e",
"metadata": {},
"source": [
":::{.callout-caution}\n",
"By default, `@tool(parse_docstring=True)` will raise `ValueError` if the docstring does not parse correctly. See [API Reference](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html) for detail and examples.\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "b63fcc3b",
"metadata": {},
"source": [
"### StructuredTool\n",
"## StructuredTool\n",
"\n",
"The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 4,
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
"metadata": {},
"outputs": [
@@ -318,7 +198,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 5,
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
"metadata": {},
"outputs": [
@@ -328,7 +208,7 @@
"text": [
"6\n",
"Calculator\n",
"multiply numbers\n",
"Calculator(a: int, b: int) -> int - multiply numbers\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
]
}
@@ -359,63 +239,6 @@
"print(calculator.args)"
]
},
{
"cell_type": "markdown",
"id": "5517995d-54e3-449b-8fdb-03561f5e4647",
"metadata": {},
"source": [
"## Creating tools from Runnables\n",
"\n",
"LangChain [Runnables](/docs/concepts#runnable-interface) that accept string or `dict` input can be converted to tools using the [as_tool](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.as_tool) method, which allows for the specification of names, descriptions, and additional schema information for arguments.\n",
"\n",
"Example usage:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "8ef593c5-cf72-4c10-bfc9-7d21874a0c24",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'answer_style': {'title': 'Answer Style', 'type': 'string'}}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.language_models import GenericFakeChatModel\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"human\", \"Hello. Please respond in the style of {answer_style}.\")]\n",
")\n",
"\n",
"# Placeholder LLM\n",
"llm = GenericFakeChatModel(messages=iter([\"hello matey\"]))\n",
"\n",
"chain = prompt | llm | StrOutputParser()\n",
"\n",
"as_tool = chain.as_tool(\n",
" name=\"Style responder\", description=\"Description of when to use tool.\"\n",
")\n",
"as_tool.args"
]
},
{
"cell_type": "markdown",
"id": "0521b787-a146-45a6-8ace-ae1ac4669dd7",
"metadata": {},
"source": [
"See [this guide](/docs/how_to/convert_runnable_to_tool) for more detail."
]
},
{
"cell_type": "markdown",
"id": "b840074b-9c10-4ca0-aed8-626c52b2398f",
@@ -428,7 +251,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 16,
"id": "1dad8f8e",
"metadata": {},
"outputs": [],
@@ -477,7 +300,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 7,
"id": "bb551c33",
"metadata": {},
"outputs": [
@@ -528,7 +351,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 8,
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
"metadata": {},
"outputs": [
@@ -560,7 +383,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 9,
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
"metadata": {},
"outputs": [
@@ -605,7 +428,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 10,
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
"metadata": {},
"outputs": [
@@ -650,7 +473,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 11,
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
"metadata": {},
"outputs": [],
@@ -673,7 +496,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 12,
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
"metadata": {},
"outputs": [
@@ -683,7 +506,7 @@
"'Error: There is no city by the name of foobar.'"
]
},
"execution_count": 16,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@@ -707,7 +530,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 13,
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
"metadata": {},
"outputs": [
@@ -717,7 +540,7 @@
"\"There is no such city, but it's probably above 0K there!\""
]
},
"execution_count": 17,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@@ -741,7 +564,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 14,
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
"metadata": {},
"outputs": [
@@ -751,7 +574,7 @@
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
]
},
"execution_count": 18,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@@ -768,189 +591,13 @@
"\n",
"get_weather_tool.invoke({\"city\": \"foobar\"})"
]
},
{
"cell_type": "markdown",
"id": "1a8d8383-11b3-445e-956f-df4e96995e00",
"metadata": {},
"source": [
"## Returning artifacts of Tool execution\n",
"\n",
"Sometimes there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns custom objects like Documents, we may want to pass some view or metadata about this output to the model without passing the raw output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
"\n",
"The Tool and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
"\n",
":::info Requires ``langchain-core >= 0.2.19``\n",
"\n",
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
"\n",
":::\n",
"\n",
"If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format=\"content_and_artifact\"` when defining our tool and make sure that we return a tuple of (content, artifact):"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "14905425-0334-43a0-9de9-5bcf622ede0e",
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"from typing import List, Tuple\n",
"\n",
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool(response_format=\"content_and_artifact\")\n",
"def generate_random_ints(min: int, max: int, size: int) -> Tuple[str, List[int]]:\n",
" \"\"\"Generate size random ints in the range [min, max].\"\"\"\n",
" array = [random.randint(min, max) for _ in range(size)]\n",
" content = f\"Successfully generated array of {size} random ints in [{min}, {max}].\"\n",
" return content, array"
]
},
{
"cell_type": "markdown",
"id": "49f057a6-8938-43ea-8faf-ae41e797ceb8",
"metadata": {},
"source": [
"If we invoke our tool directly with the tool arguments, we'll get back just the content part of the output:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "0f2e1528-404b-46e6-b87c-f0957c4b9217",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Successfully generated array of 10 random ints in [0, 9].'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"generate_random_ints.invoke({\"min\": 0, \"max\": 9, \"size\": 10})"
]
},
{
"cell_type": "markdown",
"id": "1e62ebba-1737-4b97-b61a-7313ade4e8c2",
"metadata": {},
"source": [
"If we invoke our tool with a ToolCall (like the ones generated by tool-calling models), we'll get back a ToolMessage that contains both the content and artifact generated by the Tool:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "cc197777-26eb-46b3-a83b-c2ce116c6311",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[1, 4, 2, 5, 3, 9, 0, 4, 7, 7])"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"generate_random_ints.invoke(\n",
" {\n",
" \"name\": \"generate_random_ints\",\n",
" \"args\": {\"min\": 0, \"max\": 9, \"size\": 10},\n",
" \"id\": \"123\", # required\n",
" \"type\": \"tool_call\", # required\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "dfdc1040-bf25-4790-b4c3-59452db84e11",
"metadata": {},
"source": [
"We can do the same when subclassing BaseTool:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "fe1a09d1-378b-4b91-bb5e-0697c3d7eb92",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import BaseTool\n",
"\n",
"\n",
"class GenerateRandomFloats(BaseTool):\n",
" name: str = \"generate_random_floats\"\n",
" description: str = \"Generate size random floats in the range [min, max].\"\n",
" response_format: str = \"content_and_artifact\"\n",
"\n",
" ndigits: int = 2\n",
"\n",
" def _run(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
" range_ = max - min\n",
" array = [\n",
" round(min + (range_ * random.random()), ndigits=self.ndigits)\n",
" for _ in range(size)\n",
" ]\n",
" content = f\"Generated {size} floats in [{min}, {max}], rounded to {self.ndigits} decimals.\"\n",
" return content, array\n",
"\n",
" # Optionally define an equivalent async method\n",
"\n",
" # async def _arun(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
" # ..."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "8c3d16f6-1c4a-48ab-b05a-38547c592e79",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.4277, 0.7578, 2.4871])"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rand_gen = GenerateRandomFloats(ndigits=4)\n",
"\n",
"rand_gen.invoke(\n",
" {\n",
" \"name\": \"generate_random_floats\",\n",
" \"args\": {\"min\": 0.1, \"max\": 3.3333, \"size\": 3},\n",
" \"id\": \"123\",\n",
" \"type\": \"tool_call\",\n",
" }\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "poetry-venv-311"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -962,7 +609,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.4"
},
"vscode": {
"interpreter": {

View File

@@ -58,8 +58,6 @@
}
],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import Runnable, RunnablePassthrough, chain\n",
@@ -88,7 +86,7 @@
" # NOTE: This is returning another Runnable, not an actual output.\n",
" return contextualize_question\n",
" else:\n",
" return RunnablePassthrough() | itemgetter(\"question\")\n",
" return RunnablePassthrough()\n",
"\n",
"\n",
"@chain\n",

View File

@@ -67,16 +67,15 @@ If you'd prefer not to set an environment variable you can pass the key in direc
```python
from langchain_cohere import CohereEmbeddings
embeddings_model = CohereEmbeddings(cohere_api_key="...", model='embed-english-v3.0')
embeddings_model = CohereEmbeddings(cohere_api_key="...")
```
Otherwise you can initialize simply as shown below:
Otherwise you can initialize without any params:
```python
from langchain_cohere import CohereEmbeddings
embeddings_model = CohereEmbeddings(model='embed-english-v3.0')
embeddings_model = CohereEmbeddings()
```
Do note that it is mandatory to pass the model parameter while initializing the CohereEmbeddings class.
</TabItem>
<TabItem value="huggingface" label="Hugging Face">

View File

@@ -9,13 +9,11 @@
"source": [
"# Hybrid Search\n",
"\n",
"The standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, Qdrant...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n",
"The standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, ...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n",
"\n",
"**Step 1: Make sure the vectorstore you are using supports hybrid search**\n",
"\n",
"At the moment, there is no unified way to perform hybrid search in LangChain. Each vectorstore may have their own way to do it. This is generally exposed as a keyword argument that is passed in during `similarity_search`.\n",
"\n",
"By reading the documentation or source code, figure out whether the vectorstore you are using supports hybrid search, and, if so, how to use it.\n",
"At the moment, there is no unified way to perform hybrid search in LangChain. Each vectorstore may have their own way to do it. This is generally exposed as a keyword argument that is passed in during `similarity_search`. By reading the documentation or source code, figure out whether the vectorstore you are using supports hybrid search, and, if so, how to use it.\n",
"\n",
"**Step 2: Add that parameter as a configurable field for the chain**\n",
"\n",

View File

@@ -44,7 +44,6 @@ This highlights functionality that is core to using LangChain.
- [How to: inspect runnables](/docs/how_to/inspect)
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
- [How to: migrate chains to LCEL](/docs/how_to/migrate_chains)
- [How to: pass runtime secrets to a runnable](/docs/how_to/runnable_runtime_secrets)
## Components
@@ -85,8 +84,8 @@ These are the core building blocks you can use when building applications.
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
- [How to: stream tool calls](/docs/how_to/tool_streaming)
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific)
- [How to: force a specific tool call](/docs/how_to/tool_choice)
- [How to: bind model-specific formated tools](/docs/how_to/tools_model_specific)
- [How to: force specific tool call](/docs/how_to/tool_choice)
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)
### Messages
@@ -186,21 +185,15 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
LangChain [Tools](/docs/concepts/#tools) contain a description of the tool (to pass to the language model) as well as the implementation of the function to call. Refer [here](/docs/integrations/tools/) for a list of pre-buit tools.
- [How to: create tools](/docs/how_to/custom_tools)
- [How to: use built-in tools and toolkits](/docs/how_to/tools_builtin)
- [How to: use chat models to call tools](/docs/how_to/tool_calling)
- [How to: pass tool outputs to chat models](/docs/how_to/tool_results_pass_to_model)
- [How to: create custom tools](/docs/how_to/custom_tools)
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
- [How to: pass tool results back to model](/docs/how_to/tool_results_pass_to_model)
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
- [How to: pass run time values to tools](/docs/how_to/tool_runtime)
- [How to: add a human-in-the-loop for tools](/docs/how_to/tools_human)
- [How to: handle tool errors](/docs/how_to/tools_error)
- [How to: force models to call a tool](/docs/how_to/tool_choice)
- [How to: disable parallel tool calling](/docs/how_to/tool_calling_parallel)
- [How to: access the `RunnableConfig` from a tool](/docs/how_to/tool_configure)
- [How to: stream events from a tool](/docs/how_to/tool_stream_events)
- [How to: return artifacts from a tool](/docs/how_to/tool_artifacts/)
- [How to: convert Runnables to tools](/docs/how_to/convert_runnable_to_tool)
- [How to: add ad-hoc tool calling capability to models](/docs/how_to/tools_prompting)
- [How to: pass in runtime secrets](/docs/how_to/runnable_runtime_secrets)
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
- [How to: disable parallel tool calling](/docs/how_to/tool_choice)
### Multimodal
@@ -228,7 +221,6 @@ For in depth how-to guides for agents, please check out [LangGraph](https://lang
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
### Custom
@@ -241,7 +233,6 @@ All of LangChain components can easily be extended to support your own versions.
- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
- [How to: define a custom tool](/docs/how_to/custom_tools)
- [How to: dispatch custom callback events](/docs/how_to/callbacks_custom_events)
### Serialization
- [How to: save and load LangChain objects](/docs/how_to/serialization)

View File

@@ -60,7 +60,7 @@
" * document addition by id (`add_documents` method with `ids` argument)\n",
" * delete by id (`delete` method with `ids` argument)\n",
"\n",
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MongoDBAtlasVectorSearch`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
"Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `AzureCosmosDBNoSqlVectorSearch`, `AzureCosmosDBVectorSearch`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SingleStoreDB`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n",
" \n",
"## Caution\n",
"\n",

View File

@@ -63,38 +63,6 @@
"Notice that if the contents of one of the messages to merge is a list of content blocks then the merged message will have a list of content blocks. And if both messages to merge have string contents then those are concatenated with a newline character."
]
},
{
"cell_type": "markdown",
"id": "11f7e8d3",
"metadata": {},
"source": [
"The `merge_message_runs` utility also works with messages composed together using the overloaded `+` operation:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b51855c5",
"metadata": {},
"outputs": [],
"source": [
"messages = (\n",
" SystemMessage(\"you're a good assistant.\")\n",
" + SystemMessage(\"you always respond with a joke.\")\n",
" + HumanMessage([{\"type\": \"text\", \"text\": \"i wonder why it's called langchain\"}])\n",
" + HumanMessage(\"and who is harrison chasing anyways\")\n",
" + AIMessage(\n",
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
" )\n",
" + AIMessage(\n",
" \"Why, he's probably chasing after the last cup of coffee in the office!\"\n",
" )\n",
")\n",
"\n",
"merged = merge_message_runs(messages)\n",
"print(\"\\n\\n\".join([repr(x) for x in merged]))"
]
},
{
"cell_type": "markdown",
"id": "1b2eee74-71c8-4168-b968-bca580c25d18",

View File

@@ -1,78 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "6fcd2994-0092-4fa3-9bb1-c9c84babadc5",
"metadata": {},
"source": [
"# How to pass runtime secrets to runnables\n",
"\n",
":::info Requires `langchain-core >= 0.2.22`\n",
"\n",
":::\n",
"\n",
"We can pass in secrets to our runnables at runtime using the `RunnableConfig`. Specifically we can pass in secrets with a `__` prefix to the `configurable` field. This will ensure that these secrets aren't traced as part of the invocation:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "92e42e91-c277-49de-aa7a-dfb5c993c817",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"7"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.runnables import RunnableConfig\n",
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def foo(x: int, config: RunnableConfig) -> int:\n",
" \"\"\"Sum x and a secret int\"\"\"\n",
" return x + config[\"configurable\"][\"__top_secret_int\"]\n",
"\n",
"\n",
"foo.invoke({\"x\": 5}, {\"configurable\": {\"__top_secret_int\": 2, \"traced_key\": \"bar\"}})"
]
},
{
"cell_type": "markdown",
"id": "ae3a4fb9-2ce7-46b2-b654-35dff0ae7197",
"metadata": {},
"source": [
"Looking at the LangSmith trace for this run, we can see that \"traced_key\" was recorded (as part of Metadata) while our secret int was not: https://smith.langchain.com/public/aa7e3289-49ca-422d-a408-f6b927210170/r"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"language": "python",
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -452,7 +452,7 @@
"source": [
"#### Generator Functions\n",
"\n",
"Let's fix the streaming using a generator function that can operate on the **input stream**.\n",
"Le'ts fix the streaming using a generator function that can operate on the **input stream**.\n",
"\n",
":::{.callout-tip}\n",
"A generator function (a function that uses `yield`) allows writing code that operates on **input streams**\n",

View File

@@ -1,396 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "503e36ae-ca62-4f8a-880c-4fe78ff5df93",
"metadata": {},
"source": [
"# How to return artifacts from a tool\n",
"\n",
":::info Prerequisites\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [ToolMessage](/docs/concepts/#toolmessage)\n",
"- [Tools](/docs/concepts/#tools)\n",
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
"\n",
":::\n",
"\n",
"Tools are utilities that can be called by a model, and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns a custom object, a dataframe or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n",
"\n",
"The Tool and [ToolMessage](https://api.python.langchain.com/en/latest/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n",
"\n",
":::info Requires ``langchain-core >= 0.2.19``\n",
"\n",
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
"\n",
":::\n",
"\n",
"## Defining the tool\n",
"\n",
"If we want our tool to distinguish between message content and other artifacts, we need to specify `response_format=\"content_and_artifact\"` when defining our tool and make sure that we return a tuple of (content, artifact):"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "762b9199-885f-4946-9c98-cc54d72b0d76",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU \"langchain-core>=0.2.19\""
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b9eb179d-1f41-4748-9866-b3d3e8c73cd0",
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"from typing import List, Tuple\n",
"\n",
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool(response_format=\"content_and_artifact\")\n",
"def generate_random_ints(min: int, max: int, size: int) -> Tuple[str, List[int]]:\n",
" \"\"\"Generate size random ints in the range [min, max].\"\"\"\n",
" array = [random.randint(min, max) for _ in range(size)]\n",
" content = f\"Successfully generated array of {size} random ints in [{min}, {max}].\"\n",
" return content, array"
]
},
{
"cell_type": "markdown",
"id": "0ab05d25-af4a-4e5a-afe2-f090416d7ee7",
"metadata": {},
"source": [
"## Invoking the tool with ToolCall\n",
"\n",
"If we directly invoke our tool with just the tool arguments, you'll notice that we only get back the content part of the Tool output:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "5e7d5e77-3102-4a59-8ade-e4e699dd1817",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Successfully generated array of 10 random ints in [0, 9].'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Failed to batch ingest runs: LangSmithRateLimitError('Rate limit exceeded for https://api.smith.langchain.com/runs/batch. HTTPError(\\'429 Client Error: Too Many Requests for url: https://api.smith.langchain.com/runs/batch\\', \\'{\"detail\":\"Monthly unique traces usage limit exceeded\"}\\')')\n"
]
}
],
"source": [
"generate_random_ints.invoke({\"min\": 0, \"max\": 9, \"size\": 10})"
]
},
{
"cell_type": "markdown",
"id": "30db7228-f04c-489e-afda-9a572eaa90a1",
"metadata": {},
"source": [
"In order to get back both the content and the artifact, we need to invoke our model with a ToolCall (which is just a dictionary with \"name\", \"args\", \"id\" and \"type\" keys), which has additional info needed to generate a ToolMessage like the tool call ID:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "da1d939d-a900-4b01-92aa-d19011a6b034",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Successfully generated array of 10 random ints in [0, 9].', name='generate_random_ints', tool_call_id='123', artifact=[2, 8, 0, 6, 0, 0, 1, 5, 0, 0])"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"generate_random_ints.invoke(\n",
" {\n",
" \"name\": \"generate_random_ints\",\n",
" \"args\": {\"min\": 0, \"max\": 9, \"size\": 10},\n",
" \"id\": \"123\", # required\n",
" \"type\": \"tool_call\", # required\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "a3cfc03d-020b-42c7-b0f8-c824af19e45e",
"metadata": {},
"source": [
"## Using with a model\n",
"\n",
"With a [tool-calling model](/docs/how_to/tool_calling/), we can easily use a model to call our Tool and generate ToolMessages:\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs\n",
" customVarName=\"llm\"\n",
"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "74de0286-b003-4b48-9cdd-ecab435515ca",
"metadata": {},
"outputs": [],
"source": [
"# | echo: false\n",
"# | output: false\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"llm = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "8a67424b-d19c-43df-ac7b-690bca42146c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'generate_random_ints',\n",
" 'args': {'min': 1, 'max': 24, 'size': 6},\n",
" 'id': 'toolu_01EtALY3Wz1DVYhv1TLvZGvE',\n",
" 'type': 'tool_call'}]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_tools = llm.bind_tools([generate_random_ints])\n",
"\n",
"ai_msg = llm_with_tools.invoke(\"generate 6 positive ints less than 25\")\n",
"ai_msg.tool_calls"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "00c4e906-3ca8-41e8-a0be-65cb0db7d574",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Successfully generated array of 6 random ints in [1, 24].', name='generate_random_ints', tool_call_id='toolu_01EtALY3Wz1DVYhv1TLvZGvE', artifact=[2, 20, 23, 8, 1, 15])"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"generate_random_ints.invoke(ai_msg.tool_calls[0])"
]
},
{
"cell_type": "markdown",
"id": "ddef2690-70de-4542-ab20-2337f77f3e46",
"metadata": {},
"source": [
"If we just pass in the tool call args, we'll only get back the content:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "f4a6c9a6-0ffc-4b0e-a59f-f3c3d69d824d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Successfully generated array of 6 random ints in [1, 24].'"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"generate_random_ints.invoke(ai_msg.tool_calls[0][\"args\"])"
]
},
{
"cell_type": "markdown",
"id": "98d6443b-ff41-4d91-8523-b6274fc74ee5",
"metadata": {},
"source": [
"If we wanted to declaratively create a chain, we could do this:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "eb55ec23-95a4-464e-b886-d9679bf3aaa2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[ToolMessage(content='Successfully generated array of 1 random ints in [1, 5].', name='generate_random_ints', tool_call_id='toolu_01FwYhnkwDPJPbKdGq4ng6uD', artifact=[5])]"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from operator import attrgetter\n",
"\n",
"chain = llm_with_tools | attrgetter(\"tool_calls\") | generate_random_ints.map()\n",
"\n",
"chain.invoke(\"give me a random number between 1 and 5\")"
]
},
{
"cell_type": "markdown",
"id": "4df46be2-babb-4bfe-a641-91cd3d03ffaf",
"metadata": {},
"source": [
"## Creating from BaseTool class\n",
"\n",
"If you want to create a BaseTool object directly, instead of decorating a function with `@tool`, you can do so like this:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "9a9129e1-6aee-4a10-ad57-62ef3bf0276c",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import BaseTool\n",
"\n",
"\n",
"class GenerateRandomFloats(BaseTool):\n",
" name: str = \"generate_random_floats\"\n",
" description: str = \"Generate size random floats in the range [min, max].\"\n",
" response_format: str = \"content_and_artifact\"\n",
"\n",
" ndigits: int = 2\n",
"\n",
" def _run(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
" range_ = max - min\n",
" array = [\n",
" round(min + (range_ * random.random()), ndigits=self.ndigits)\n",
" for _ in range(size)\n",
" ]\n",
" content = f\"Generated {size} floats in [{min}, {max}], rounded to {self.ndigits} decimals.\"\n",
" return content, array\n",
"\n",
" # Optionally define an equivalent async method\n",
"\n",
" # async def _arun(self, min: float, max: float, size: int) -> Tuple[str, List[float]]:\n",
" # ..."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "d7322619-f420-4b29-8ee5-023e693d0179",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.'"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rand_gen = GenerateRandomFloats(ndigits=4)\n",
"rand_gen.invoke({\"min\": 0.1, \"max\": 3.3333, \"size\": 3})"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "0892f277-23a6-4bb8-a0e9-59f533ac9750",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ToolMessage(content='Generated 3 floats in [0.1, 3.3333], rounded to 4 decimals.', name='generate_random_floats', tool_call_id='123', artifact=[1.5789, 2.464, 2.2719])"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rand_gen.invoke(\n",
" {\n",
" \"name\": \"generate_random_floats\",\n",
" \"args\": {\"min\": 0.1, \"max\": 3.3333, \"size\": 3},\n",
" \"id\": \"123\",\n",
" \"type\": \"tool_call\",\n",
" }\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"language": "python",
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -17,7 +17,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to use chat models to call tools\n",
"# How to use a model to call tools\n",
"\n",
":::info Prerequisites\n",
"\n",
@@ -82,24 +82,30 @@
"## Passing tools to chat models\n",
"\n",
"Chat models that support tool calling features implement a `.bind_tools` method, which \n",
"receives a list of functions, Pydantic models, or LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
"receives a list of LangChain [tool objects](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html#langchain_core.tools.BaseTool) \n",
"and binds them to the chat model in its expected format. Subsequent invocations of the \n",
"chat model will include tool schemas in its calls to the LLM.\n",
"\n",
"For example, below we implement simple tools for arithmetic:"
"For example, we can define the schema for custom tools using the `@tool` decorator \n",
"on Python functions:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def add(a: int, b: int) -> int:\n",
" \"\"\"Adds a and b.\"\"\"\n",
" return a + b\n",
"\n",
"\n",
"@tool\n",
"def multiply(a: int, b: int) -> int:\n",
" \"\"\"Multiplies a and b.\"\"\"\n",
" return a * b\n",
@@ -112,14 +118,12 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"LangChain also implements a `@tool` decorator that allows for further control of the tool schema, such as tool names and argument descriptions. See the how-to guide [here](/docs/how_to/custom_tools/#creating-tools-from-functions) for detail.\n",
"\n",
"We can also define the schema using [Pydantic](https://docs.pydantic.dev):"
"Or below, we define the schema using [Pydantic](https://docs.pydantic.dev):"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@@ -339,7 +343,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.10.5"
}
},
"nbformat": 4,

View File

@@ -4,13 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to disable parallel tool calling\n",
"\n",
":::info OpenAI-specific\n",
"\n",
"This API is currently only supported by OpenAI.\n",
"\n",
":::\n",
"### Disabling parallel tool calling (OpenAI only)\n",
"\n",
"OpenAI tool calling performs tool calling in parallel by default. That means that if we ask a question like \"What is the weather in Tokyo, New York, and Chicago?\" and we have a tool for getting the weather, it will call the tool 3 times in parallel. We can force it to call only a single tool once by using the ``parallel_tool_call`` parameter."
]
@@ -105,24 +99,10 @@
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 4
"nbformat_minor": 2
}

View File

@@ -4,15 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to force models to call a tool\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
":::\n",
"# How to force tool calling behavior\n",
"\n",
"In order to force our LLM to spelect a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:"
]
@@ -125,24 +117,10 @@
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 4
"nbformat_minor": 2
}

View File

@@ -1,132 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to access the RunnableConfig from a tool\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [Custom tools](/docs/how_to/custom_tools)\n",
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language-lcel)\n",
"- [Configuring runnable behavior](/docs/how_to/configure/)\n",
"\n",
":::\n",
"\n",
"If you have a tool that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n",
"\n",
"Tools are runnables, and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.config.RunnableConfig.html) object. This guide show you some examples of how to do that.\n",
"\n",
":::caution Compatibility\n",
"\n",
"This guide requires `langchain-core>=0.2.16`.\n",
"\n",
":::\n",
"\n",
"## Inferring by parameter type\n",
"\n",
"To access reference the active config object from your custom tool, you'll need to add a parameter to your tool's signature typed as `RunnableConfig`. When you invoke your tool, LangChain will inspect your tool's signature, look for a parameter typed as `RunnableConfig`, and if it exists, populate that parameter with the correct value.\n",
"\n",
"**Note:** The actual name of the parameter doesn't matter, only the typing.\n",
"\n",
"To illustrate this, define a custom tool that takes a two parameters - one typed as a string, the other typed as `RunnableConfig`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain_core"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.runnables import RunnableConfig\n",
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"async def reverse_tool(text: str, special_config_param: RunnableConfig) -> str:\n",
" \"\"\"A test tool that combines input text with a configurable parameter.\"\"\"\n",
" return (text + special_config_param[\"configurable\"][\"additional_field\"])[::-1]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Then, if we invoke the tool with a `config` containing a `configurable` field, we can see that `additional_field` is passed through correctly:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'321cba'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await reverse_tool.ainvoke(\n",
" {\"text\": \"abc\"}, config={\"configurable\": {\"additional_field\": \"123\"}}\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've now seen how to configure and stream events from within a tool. Next, check out the following guides for more on using tools:\n",
"\n",
"- [Stream events from child runs within a custom tool](/docs/how_to/tool_stream_events/)\n",
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
"\n",
"You can also check out some more specific uses of tool calling:\n",
"\n",
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -4,22 +4,14 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to pass tool outputs to chat models\n",
"# How to pass tool outputs to the model\n",
"\n",
":::info Prerequisites\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Tools](/docs/concepts/#tools)\n",
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
"\n",
":::\n",
"\n",
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s and `ToolCall`s. First, let's define our tools and our model."
"If we're using the model-generated tool invocations to actually call tools and want to pass the tool results back to the model, we can do so using `ToolMessage`s. First, let's define our tools and our model."
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -43,7 +35,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -62,32 +54,25 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"The nice thing about Tools is that if we invoke them with a ToolCall, we'll automatically get back a ToolMessage that can be fed back to the model: \n",
"\n",
":::info Requires ``langchain-core >= 0.2.19``\n",
"\n",
"This functionality was added in ``langchain-core == 0.2.19``. Please make sure your package is up to date.\n",
"\n",
":::"
"Now we can use ``ToolMessage`` to pass back the output of the tool calls to the model."
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[HumanMessage(content='What is 3 * 12? Also, what is 11 + 49?'),\n",
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Smg3NHJNxrKfAmd4f9GkaYn3', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'multiply'}, 'type': 'function'}, {'id': 'call_55K1C0DmH6U5qh810gW34xZ0', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 49, 'prompt_tokens': 88, 'total_tokens': 137}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-56657feb-96dd-456c-ab8e-1857eab2ade0-0', tool_calls=[{'name': 'multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_Smg3NHJNxrKfAmd4f9GkaYn3', 'type': 'tool_call'}, {'name': 'add', 'args': {'a': 11, 'b': 49}, 'id': 'call_55K1C0DmH6U5qh810gW34xZ0', 'type': 'tool_call'}], usage_metadata={'input_tokens': 88, 'output_tokens': 49, 'total_tokens': 137}),\n",
" ToolMessage(content='36', name='multiply', tool_call_id='call_Smg3NHJNxrKfAmd4f9GkaYn3'),\n",
" ToolMessage(content='60', name='add', tool_call_id='call_55K1C0DmH6U5qh810gW34xZ0')]"
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_svc2GLSxNFALbaCAbSjMI9J8', 'function': {'arguments': '{\"a\": 3, \"b\": 12}', 'name': 'Multiply'}, 'type': 'function'}, {'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh', 'function': {'arguments': '{\"a\": 11, \"b\": 49}', 'name': 'Add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 50, 'prompt_tokens': 105, 'total_tokens': 155}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a79ad1dd-95f1-4a46-b688-4c83f327a7b3-0', tool_calls=[{'name': 'Multiply', 'args': {'a': 3, 'b': 12}, 'id': 'call_svc2GLSxNFALbaCAbSjMI9J8'}, {'name': 'Add', 'args': {'a': 11, 'b': 49}, 'id': 'call_r8jxte3zW6h3MEGV3zH2qzFh'}]),\n",
" ToolMessage(content='36', tool_call_id='call_svc2GLSxNFALbaCAbSjMI9J8'),\n",
" ToolMessage(content='60', tool_call_id='call_r8jxte3zW6h3MEGV3zH2qzFh')]"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
"output_type": "display_data"
}
],
"source": [
@@ -100,25 +85,24 @@
"messages.append(ai_msg)\n",
"for tool_call in ai_msg.tool_calls:\n",
" selected_tool = {\"add\": add, \"multiply\": multiply}[tool_call[\"name\"].lower()]\n",
" tool_msg = selected_tool.invoke(tool_call)\n",
" messages.append(tool_msg)\n",
" tool_output = selected_tool.invoke(tool_call[\"args\"])\n",
" messages.append(ToolMessage(tool_output, tool_call_id=tool_call[\"id\"]))\n",
"messages"
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 153, 'total_tokens': 171}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-ba5032f0-f773-406d-a408-8314e66511d0-0', usage_metadata={'input_tokens': 153, 'output_tokens': 18, 'total_tokens': 171})"
"AIMessage(content='3 * 12 is 36 and 11 + 49 is 60.', response_metadata={'token_usage': {'completion_tokens': 18, 'prompt_tokens': 171, 'total_tokens': 189}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_d9767fc5b9', 'finish_reason': 'stop', 'logprobs': None}, id='run-20b52149-e00d-48ea-97cf-f8de7a255f8c-0')"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
"output_type": "display_data"
}
],
"source": [
@@ -134,24 +118,10 @@
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"language": "python",
"name": "poetry-venv-311"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 4
"nbformat_minor": 2
}

View File

@@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to pass run time values to tools\n",
"# How to pass run time values to a tool\n",
"\n",
":::info Prerequisites\n",
"\n",
@@ -12,28 +12,29 @@
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [How to create tools](/docs/how_to/custom_tools)\n",
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
"- [How to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling)\n",
":::\n",
"\n",
":::info Using with LangGraph\n",
":::{.callout-info} Supported models\n",
"\n",
"This how-to guide uses models with native tool calling capability.\n",
"You can find a [list of all models that support tool calling](/docs/integrations/chat/).\n",
"\n",
":::\n",
"\n",
":::{.callout-info} Using with LangGraph\n",
"\n",
"If you're using LangGraph, please refer to [this how-to guide](https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/)\n",
"which shows how to create an agent that keeps track of a given user's favorite pets.\n",
":::\n",
"\n",
":::caution Added in `langchain-core==0.2.21`\n",
"\n",
"Must have `langchain-core>=0.2.21` to use this functionality.\n",
"\n",
":::\n",
"\n",
"You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n",
"\n",
"Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n",
"\n",
"Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.\n",
"\n",
"This how-to guide shows you how to prevent the model from generating certain tool arguments and injecting them in directly at runtime."
"This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values."
]
},
{
@@ -56,12 +57,23 @@
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"# %pip install -qU langchain langchain_openai\n",
"%pip install -qU langchain langchain_openai\n",
"\n",
"import os\n",
"from getpass import getpass\n",
@@ -78,9 +90,10 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Hiding arguments from the model\n",
"# Passing request time information\n",
"\n",
"We can use the InjectedToolArg annotation to mark certain parameters of our Tool, like `user_id` as being injected at runtime, meaning they shouldn't be generated by the model"
"The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example,\n",
"this information may be the user ID as resolved from the request itself."
]
},
{
@@ -91,88 +104,46 @@
"source": [
"from typing import List\n",
"\n",
"from langchain_core.tools import InjectedToolArg, tool\n",
"from typing_extensions import Annotated\n",
"\n",
"user_to_pets = {}\n",
"\n",
"\n",
"@tool(parse_docstring=True)\n",
"def update_favorite_pets(\n",
" pets: List[str], user_id: Annotated[str, InjectedToolArg]\n",
") -> None:\n",
" \"\"\"Add the list of favorite pets.\n",
"\n",
" Args:\n",
" pets: List of favorite pets to set.\n",
" user_id: User's ID.\n",
" \"\"\"\n",
" user_to_pets[user_id] = pets\n",
"\n",
"\n",
"@tool(parse_docstring=True)\n",
"def delete_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
" \"\"\"Delete the list of favorite pets.\n",
"\n",
" Args:\n",
" user_id: User's ID.\n",
" \"\"\"\n",
" if user_id in user_to_pets:\n",
" del user_to_pets[user_id]\n",
"\n",
"\n",
"@tool(parse_docstring=True)\n",
"def list_favorite_pets(user_id: Annotated[str, InjectedToolArg]) -> None:\n",
" \"\"\"List favorite pets if any.\n",
"\n",
" Args:\n",
" user_id: User's ID.\n",
" \"\"\"\n",
" return user_to_pets.get(user_id, [])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If we look at the input schemas for these tools, we'll see that user_id is still listed:"
"from langchain_core.output_parsers import JsonOutputParser\n",
"from langchain_core.tools import BaseTool, tool"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_petsSchema',\n",
" 'description': 'Add the list of favorite pets.',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}},\n",
" 'user_id': {'title': 'User Id',\n",
" 'description': \"User's ID.\",\n",
" 'type': 'string'}},\n",
" 'required': ['pets', 'user_id']}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"update_favorite_pets.get_input_schema().schema()"
"user_to_pets = {}\n",
"\n",
"\n",
"def generate_tools_for_user(user_id: str) -> List[BaseTool]:\n",
" \"\"\"Generate a set of tools that have a user id associated with them.\"\"\"\n",
"\n",
" @tool\n",
" def update_favorite_pets(pets: List[str]) -> None:\n",
" \"\"\"Add the list of favorite pets.\"\"\"\n",
" user_to_pets[user_id] = pets\n",
"\n",
" @tool\n",
" def delete_favorite_pets() -> None:\n",
" \"\"\"Delete the list of favorite pets.\"\"\"\n",
" if user_id in user_to_pets:\n",
" del user_to_pets[user_id]\n",
"\n",
" @tool\n",
" def list_favorite_pets() -> None:\n",
" \"\"\"List favorite pets if any.\"\"\"\n",
" return user_to_pets.get(user_id, [])\n",
"\n",
" return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"But if we look at the tool call schema, which is what is passed to the model for tool-calling, user_id has been removed:"
"Verify that the tools work correctly"
]
},
{
@@ -181,60 +152,46 @@
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_pets',\n",
" 'description': 'Add the list of favorite pets.',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}}},\n",
" 'required': ['pets']}"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"{'eugene': ['cat', 'dog']}\n",
"['cat', 'dog']\n"
]
}
],
"source": [
"update_favorite_pets.tool_call_schema.schema()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"So when we invoke our tool, we need to pass in user_id:"
"update_pets, delete_pets, list_pets = generate_tools_for_user(\"eugene\")\n",
"update_pets.invoke({\"pets\": [\"cat\", \"dog\"]})\n",
"print(user_to_pets)\n",
"print(list_pets.invoke({}))"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'123': ['lizard', 'dog']}\n",
"['lizard', 'dog']\n"
]
}
],
"outputs": [],
"source": [
"user_id = \"123\"\n",
"update_favorite_pets.invoke({\"pets\": [\"lizard\", \"dog\"], \"user_id\": user_id})\n",
"print(user_to_pets)\n",
"print(list_favorite_pets.invoke({\"user_id\": user_id}))"
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"def handle_run_time_request(user_id: str, query: str):\n",
" \"\"\"Handle run time request.\"\"\"\n",
" tools = generate_tools_for_user(user_id)\n",
" llm_with_tools = llm.bind_tools(tools)\n",
" prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", \"You are a helpful assistant.\")],\n",
" )\n",
" chain = prompt | llm_with_tools\n",
" return llm_with_tools.invoke(query)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"But when the model calls the tool, no user_id argument will be generated:"
"This code will allow the LLM to invoke the tools, but the LLM is **unaware** of the fact that a **user ID** even exists!"
]
},
{
@@ -247,8 +204,7 @@
"text/plain": [
"[{'name': 'update_favorite_pets',\n",
" 'args': {'pets': ['cats', 'parrots']},\n",
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
" 'type': 'tool_call'}]"
" 'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]"
]
},
"execution_count": 6,
@@ -257,349 +213,30 @@
}
],
"source": [
"tools = [\n",
" update_favorite_pets,\n",
" delete_favorite_pets,\n",
" list_favorite_pets,\n",
"]\n",
"llm_with_tools = llm.bind_tools(tools)\n",
"ai_msg = llm_with_tools.invoke(\"my favorite animals are cats and parrots\")\n",
"ai_msg.tool_calls"
"ai_message = handle_run_time_request(\n",
" \"eugene\", \"my favorite animals are cats and parrots.\"\n",
")\n",
"ai_message.tool_calls"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Injecting arguments at runtime"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If we want to actually execute our tools using the model-generated tool call, we'll need to inject the user_id ourselves:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'update_favorite_pets',\n",
" 'args': {'pets': ['cats', 'parrots'], 'user_id': '123'},\n",
" 'id': 'call_W3cn4lZmJlyk8PCrKN4PRwqB',\n",
" 'type': 'tool_call'}]"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from copy import deepcopy\n",
":::{.callout-important}\n",
"\n",
"from langchain_core.runnables import chain\n",
"Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.\n",
"\n",
"\n",
"@chain\n",
"def inject_user_id(ai_msg):\n",
" tool_calls = []\n",
" for tool_call in ai_msg.tool_calls:\n",
" tool_call_copy = deepcopy(tool_call)\n",
" tool_call_copy[\"args\"][\"user_id\"] = user_id\n",
" tool_calls.append(tool_call_copy)\n",
" return tool_calls\n",
"\n",
"\n",
"inject_user_id.invoke(ai_msg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And now we can chain together our model, injection code, and the actual tools to create a tool-executing chain:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[ToolMessage(content='null', name='update_favorite_pets', tool_call_id='call_HUyF6AihqANzEYxQnTUKxkXj')]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool_map = {tool.name: tool for tool in tools}\n",
"\n",
"\n",
"@chain\n",
"def tool_router(tool_call):\n",
" return tool_map[tool_call[\"name\"]]\n",
"\n",
"\n",
"chain = llm_with_tools | inject_user_id | tool_router.map()\n",
"chain.invoke(\"my favorite animals are cats and parrots\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Looking at the user_to_pets dict, we can see that it's been updated to include cats and parrots:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'123': ['cats', 'parrots']}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"user_to_pets"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Other ways of annotating args\n",
"\n",
"Here are a few other ways of annotating our tool args:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'UpdateFavoritePetsSchema',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}},\n",
" 'user_id': {'title': 'User Id',\n",
" 'description': \"User's ID.\",\n",
" 'type': 'string'}},\n",
" 'required': ['pets', 'user_id']}"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.tools import BaseTool\n",
"\n",
"\n",
"class UpdateFavoritePetsSchema(BaseModel):\n",
" \"\"\"Update list of favorite pets\"\"\"\n",
"\n",
" pets: List[str] = Field(..., description=\"List of favorite pets to set.\")\n",
" user_id: Annotated[str, InjectedToolArg] = Field(..., description=\"User's ID.\")\n",
"\n",
"\n",
"@tool(args_schema=UpdateFavoritePetsSchema)\n",
"def update_favorite_pets(pets, user_id):\n",
" user_to_pets[user_id] = pets\n",
"\n",
"\n",
"update_favorite_pets.get_input_schema().schema()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_pets',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}}},\n",
" 'required': ['pets']}"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"update_favorite_pets.tool_call_schema.schema()"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'UpdateFavoritePetsSchema',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}},\n",
" 'user_id': {'title': 'User Id',\n",
" 'description': \"User's ID.\",\n",
" 'type': 'string'}},\n",
" 'required': ['pets', 'user_id']}"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Optional, Type\n",
"\n",
"\n",
"class UpdateFavoritePets(BaseTool):\n",
" name: str = \"update_favorite_pets\"\n",
" description: str = \"Update list of favorite pets\"\n",
" args_schema: Optional[Type[BaseModel]] = UpdateFavoritePetsSchema\n",
"\n",
" def _run(self, pets, user_id):\n",
" user_to_pets[user_id] = pets\n",
"\n",
"\n",
"UpdateFavoritePets().get_input_schema().schema()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_pets',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'description': 'List of favorite pets to set.',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}}},\n",
" 'required': ['pets']}"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"UpdateFavoritePets().tool_call_schema.schema()"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_petsSchema',\n",
" 'description': 'Use the tool.\\n\\nAdd run_manager: Optional[CallbackManagerForToolRun] = None\\nto child implementations to enable tracing.',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}},\n",
" 'user_id': {'title': 'User Id', 'type': 'string'}},\n",
" 'required': ['pets', 'user_id']}"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"class UpdateFavoritePets2(BaseTool):\n",
" name: str = \"update_favorite_pets\"\n",
" description: str = \"Update list of favorite pets\"\n",
"\n",
" def _run(self, pets: List[str], user_id: Annotated[str, InjectedToolArg]) -> None:\n",
" user_to_pets[user_id] = pets\n",
"\n",
"\n",
"UpdateFavoritePets2().get_input_schema().schema()"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'update_favorite_pets',\n",
" 'description': 'Update list of favorite pets',\n",
" 'type': 'object',\n",
" 'properties': {'pets': {'title': 'Pets',\n",
" 'type': 'array',\n",
" 'items': {'type': 'string'}}},\n",
" 'required': ['pets']}"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"UpdateFavoritePets2().tool_call_schema.schema()"
"To see how to invoke the tools, please refer to [how to use a model to call tools](https://python.langchain.com/v0.2/docs/how_to/tool_calling).\n",
":::"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-311",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "poetry-venv-311"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -611,7 +248,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.4"
}
},
"nbformat": 4,

View File

@@ -1,302 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to stream events from a tool\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [Custom tools](/docs/how_to/custom_tools)\n",
"- [Using stream events](/docs/how_to/streaming/#using-stream-events)\n",
"- [Accessing RunnableConfig within a custom tool](/docs/how_to/tool_configure/)\n",
"\n",
":::\n",
"\n",
"If you have tools that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n",
"\n",
":::caution Compatibility\n",
"\n",
"LangChain cannot automatically propagate configuration, including callbacks necessary for `astream_events()`, to child runnables if you are running `async` code in `python<=3.10`. This is a common reason why you may fail to see events being emitted from custom runnables or tools.\n",
"\n",
"If you are running python<=3.10, you will need to manually propagate the `RunnableConfig` object to the child runnable in async environments. For an example of how to manually propagate the config, see the implementation of the `bar` RunnableLambda below.\n",
"\n",
"If you are running python>=3.11, the `RunnableConfig` will automatically propagate to child runnables in async environment. However, it is still a good idea to propagate the `RunnableConfig` manually if your code may run in older Python versions.\n",
"\n",
"This guide also requires `langchain-core>=0.2.16`.\n",
":::\n",
"\n",
"Say you have a custom tool that calls a chain that condenses its input by prompting a chat model to return only 10 words, then reversing the output. First, define it in a naive way:\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"model\" />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic langchain_core\n",
"\n",
"import os\n",
"from getpass import getpass\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"if \"ANTHROPIC_API_KEY\" not in os.environ:\n",
" os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n",
"\n",
"model = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"async def special_summarization_tool(long_text: str) -> str:\n",
" \"\"\"A tool that summarizes input text using advanced techniques.\"\"\"\n",
" prompt = ChatPromptTemplate.from_template(\n",
" \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n",
" )\n",
"\n",
" def reverse(x: str):\n",
" return x[::-1]\n",
"\n",
" chain = prompt | model | StrOutputParser() | reverse\n",
" summary = await chain.ainvoke({\"long_text\": long_text})\n",
" return summary"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Invoking the tool directly works just fine:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'.yad noitaudarg rof tiftuo sesoohc yrraB ;scisyhp seifed eeB'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"LONG_TEXT = \"\"\"\n",
"NARRATOR:\n",
"(Black screen with text; The sound of buzzing bees can be heard)\n",
"According to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\n",
"BARRY BENSON:\n",
"(Barry is picking out a shirt)\n",
"Yellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\n",
"JANET BENSON:\n",
"Barry! Breakfast is ready!\n",
"BARRY:\n",
"Coming! Hang on a second.\n",
"\"\"\"\n",
"\n",
"await special_summarization_tool.ainvoke({\"long_text\": LONG_TEXT})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"But if you wanted to access the raw output from the chat model rather than the full tool, you might try to use the [`astream_events()`](/docs/how_to/streaming/#using-stream-events) method and look for an `on_chat_model_end` event. Here's what happens:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"stream = special_summarization_tool.astream_events(\n",
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
")\n",
"\n",
"async for event in stream:\n",
" if event[\"event\"] == \"on_chat_model_end\":\n",
" # Never triggers in python<=3.10!\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You'll notice (unless you're running through this guide in `python>=3.11`) that there are no chat model events emitted from the child run!\n",
"\n",
"This is because the example above does not pass the tool's config object into the internal chain. To fix this, redefine your tool to take a special parameter typed as `RunnableConfig` (see [this guide](/docs/how_to/tool_configure) for more details). You'll also need to pass that parameter through into the internal chain when executing it:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.runnables import RunnableConfig\n",
"\n",
"\n",
"@tool\n",
"async def special_summarization_tool_with_config(\n",
" long_text: str, config: RunnableConfig\n",
") -> str:\n",
" \"\"\"A tool that summarizes input text using advanced techniques.\"\"\"\n",
" prompt = ChatPromptTemplate.from_template(\n",
" \"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n{long_text}\"\n",
" )\n",
"\n",
" def reverse(x: str):\n",
" return x[::-1]\n",
"\n",
" chain = prompt | model | StrOutputParser() | reverse\n",
" # Pass the \"config\" object as an argument to any executed runnables\n",
" summary = await chain.ainvoke({\"long_text\": long_text}, config=config)\n",
" return summary"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And now try the same `astream_events()` call as before with your new tool:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chat_model_end', 'data': {'output': AIMessage(content='Bee defies physics; Barry chooses outfit for graduation day.', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-d23abc80-0dce-4f74-9d7b-fb98ca4f2a9e', usage_metadata={'input_tokens': 182, 'output_tokens': 16, 'total_tokens': 198}), 'input': {'messages': [[HumanMessage(content=\"You are an expert writer. Summarize the following text in 10 words or less:\\n\\n\\nNARRATOR:\\n(Black screen with text; The sound of buzzing bees can be heard)\\nAccording to all known laws of aviation, there is no way a bee should be able to fly. Its wings are too small to get its fat little body off the ground. The bee, of course, flies anyway because bees don't care what humans think is impossible.\\nBARRY BENSON:\\n(Barry is picking out a shirt)\\nYellow, black. Yellow, black. Yellow, black. Yellow, black. Ooh, black and yellow! Let's shake it up a little.\\nJANET BENSON:\\nBarry! Breakfast is ready!\\nBARRY:\\nComing! Hang on a second.\\n\")]]}}, 'run_id': 'd23abc80-0dce-4f74-9d7b-fb98ca4f2a9e', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['f25c41fe-8972-4893-bc40-cecf3922c1fa']}\n"
]
}
],
"source": [
"stream = special_summarization_tool_with_config.astream_events(\n",
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
")\n",
"\n",
"async for event in stream:\n",
" if event[\"event\"] == \"on_chat_model_end\":\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Awesome! This time there's an event emitted.\n",
"\n",
"For streaming, `astream_events()` automatically calls internal runnables in a chain with streaming enabled if possible, so if you wanted to a stream of tokens as they are generated from the chat model, you could simply filter to look for `on_chat_model_stream` events with no other changes:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42', usage_metadata={'input_tokens': 182, 'output_tokens': 0, 'total_tokens': 182})}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='Bee', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' def', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='ies physics', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=';', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' Barry', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' cho', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='oses outfit', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' for', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' graduation', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' day', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='.', id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42')}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='', response_metadata={'stop_reason': 'end_turn', 'stop_sequence': None}, id='run-f24ab147-0b82-4e63-810a-b12bd8d1fb42', usage_metadata={'input_tokens': 0, 'output_tokens': 16, 'total_tokens': 16})}, 'run_id': 'f24ab147-0b82-4e63-810a-b12bd8d1fb42', 'name': 'ChatAnthropic', 'tags': ['seq:step:2'], 'metadata': {'ls_provider': 'anthropic', 'ls_model_name': 'claude-3-5-sonnet-20240620', 'ls_model_type': 'chat', 'ls_temperature': 0.0, 'ls_max_tokens': 1024}, 'parent_ids': ['385f3612-417c-4a70-aae0-cce3a5ba6fb6']}\n"
]
}
],
"source": [
"stream = special_summarization_tool_with_config.astream_events(\n",
" {\"long_text\": LONG_TEXT}, version=\"v2\"\n",
")\n",
"\n",
"async for event in stream:\n",
" if event[\"event\"] == \"on_chat_model_stream\":\n",
" print(event)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've now seen how to stream events from within a tool. Next, check out the following guides for more on using tools:\n",
"\n",
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
"- Pass [tool results back to a model](/docs/how_to/tool_results_pass_to_model)\n",
"- [Dispatch custom callback events](/docs/how_to/callbacks_custom_events)\n",
"\n",
"You can also check out some more specific uses of tool calling:\n",
"\n",
"- Building [tool-using chains and agents](/docs/how_to#tools)\n",
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -228,7 +228,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.4"
}
},
"nbformat": 4,

View File

@@ -419,13 +419,13 @@
"Invoking: `exponentiate` with `{'base': 405, 'exponent': 2}`\n",
"\n",
"\n",
"\u001b[0m\u001b[38;5;200m\u001b[1;3m13286025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of taking 3 to the fifth power is 243. \n",
"\u001b[0m\u001b[38;5;200m\u001b[1;3m164025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of taking 3 to the fifth power is 243. \n",
"\n",
"The sum of twelve and three is 15. \n",
"\n",
"Multiplying 243 by 15 gives 3645. \n",
"\n",
"Finally, squaring 3645 gives 13286025.\u001b[0m\n",
"Finally, squaring 3645 gives 164025.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@@ -434,7 +434,7 @@
"data": {
"text/plain": [
"{'input': 'Take 3 to the fifth power and multiply that by the sum of twelve and three, then square the whole result',\n",
" 'output': 'The result of taking 3 to the fifth power is 243. \\n\\nThe sum of twelve and three is 15. \\n\\nMultiplying 243 by 15 gives 3645. \\n\\nFinally, squaring 3645 gives 13286025.'}"
" 'output': 'The result of taking 3 to the fifth power is 243. \\n\\nThe sum of twelve and three is 15. \\n\\nMultiplying 243 by 15 gives 3645. \\n\\nFinally, squaring 3645 gives 164025.'}"
]
},
"execution_count": 18,

View File

@@ -7,18 +7,9 @@
"source": [
"# How to handle tool errors\n",
"\n",
":::info Prerequisites\n",
"Using a model to invoke a tool has some obvious potential failure modes. Firstly, the model needs to return a output that can be parsed at all. Secondly, the model needs to return tool arguments that are valid.\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [How to use a model to call tools](/docs/how_to/tool_calling)\n",
"\n",
":::\n",
"\n",
"Calling tools with an LLM is generally more reliable than pure prompting, but it isn't perfect. The model may try to call a tool that doesn't exist or fail to return arguments that match the requested schema. Strategies like keeping schemas simple, reducing the number of tools you pass at once, and having good names and descriptions can help mitigate this risk, but aren't foolproof.\n",
"\n",
"This guide covers some ways to build error handling into your chains to mitigate these failure modes."
"We can build error handling into our chains to mitigate these failure modes."
]
},
{
@@ -51,7 +42,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"id": "08785b6d-722d-4620-b6ec-36deb3842c69",
"metadata": {},
"outputs": [],
@@ -81,7 +72,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"id": "86258950-5e61-4340-81b9-84a5d26e8773",
"metadata": {},
"outputs": [],
@@ -91,14 +82,12 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "1d20604e-c4d1-4d21-841b-23e4f61aec36",
"metadata": {},
"outputs": [],
@@ -110,13 +99,28 @@
"@tool\n",
"def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:\n",
" \"\"\"Do something complex with a complex tool.\"\"\"\n",
" return int_arg * float_arg\n",
"\n",
"\n",
" return int_arg * float_arg"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "553c2c13-28c8-4451-8a3a-6c31d52dc31d",
"metadata": {},
"outputs": [],
"source": [
"llm_with_tools = llm.bind_tools(\n",
" [complex_tool],\n",
")\n",
"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "802b2eca-9f79-4d6c-8257-85139ca5c752",
"metadata": {},
"outputs": [],
"source": [
"# Define chain\n",
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool"
]
@@ -131,7 +135,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 12,
"id": "d354664c-ac44-4967-a35f-8912b3ad9477",
"metadata": {},
"outputs": [
@@ -142,14 +146,14 @@
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/runnables/base.py:2572\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 2570\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m step\u001b[38;5;241m.\u001b[39minvoke(\u001b[38;5;28minput\u001b[39m, config, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 2571\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2572\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2573\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2574\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:380\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 373\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 374\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 375\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 376\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 377\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 378\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 379\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 380\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 381\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 382\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 383\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 384\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 385\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 386\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 387\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 388\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 389\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:537\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 535\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 536\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 537\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 538\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 539\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:526\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, config, **kwargs)\u001b[0m\n\u001b[1;32m 524\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 525\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(_set_config_context, child_config)\n\u001b[0;32m--> 526\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 527\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 528\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 529\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(\n\u001b[1;32m 530\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m context\u001b[38;5;241m.\u001b[39mrun(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run, \u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 534\u001b[0m )\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/langchain_core/tools.py:424\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 422\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 423\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 424\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 425\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 426\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 427\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 428\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 429\u001b[0m }\n\u001b[1;32m 430\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:526\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.parse_obj\u001b[0;34m()\u001b[0m\n",
"File \u001b[0;32m~/.pyenv/versions/3.10.5/lib/python3.10/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
"Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:241\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 237\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 238\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 239\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 240\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 241\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 242\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 243\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 244\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 245\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 246\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 247\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 248\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:387\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 385\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 386\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 387\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 388\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 389\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:378\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 364\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_tool_start(\n\u001b[1;32m 365\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdescription\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdescription},\n\u001b[1;32m 366\u001b[0m tool_input \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tool_input, \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mstr\u001b[39m(tool_input),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 375\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 376\u001b[0m )\n\u001b[1;32m 377\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 378\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 379\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 380\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 382\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 383\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 384\u001b[0m )\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:283\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 282\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 283\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 284\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 285\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 286\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 287\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 288\u001b[0m }\n\u001b[1;32m 289\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:526\u001b[0m, in \u001b[0;36mBaseModel.parse_obj\u001b[0;34m(cls, obj)\u001b[0m\n\u001b[1;32m 524\u001b[0m exc \u001b[38;5;241m=\u001b[39m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m expected dict not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mobj\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 525\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ValidationError([ErrorWrapper(exc, loc\u001b[38;5;241m=\u001b[39mROOT_KEY)], \u001b[38;5;28mcls\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m--> 526\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mobj\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:341\u001b[0m, in \u001b[0;36mBaseModel.__init__\u001b[0;34m(__pydantic_self__, **data)\u001b[0m\n\u001b[1;32m 339\u001b[0m values, fields_set, validation_error \u001b[38;5;241m=\u001b[39m validate_model(__pydantic_self__\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m, data)\n\u001b[1;32m 340\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m validation_error:\n\u001b[0;32m--> 341\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m validation_error\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 343\u001b[0m object_setattr(__pydantic_self__, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__dict__\u001b[39m\u001b[38;5;124m'\u001b[39m, values)\n",
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for complex_toolSchema\ndict_arg\n field required (type=value_error.missing)"
]
}
@@ -172,26 +176,10 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 6,
"id": "8fedb550-683d-45ae-8876-ae7acb332019",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Calling tool with arguments:\n",
"\n",
"{'int_arg': 5, 'float_arg': 2.1}\n",
"\n",
"raised the following error:\n",
"\n",
"<class 'pydantic.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
"dict_arg\n",
" field required (type=value_error.missing)\n"
]
}
],
"outputs": [],
"source": [
"from typing import Any\n",
"\n",
@@ -205,8 +193,32 @@
" return f\"Calling tool with arguments:\\n\\n{tool_args}\\n\\nraised the following error:\\n\\n{type(e)}: {e}\"\n",
"\n",
"\n",
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool\n",
"\n",
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "71a2c98d-c0be-4c0a-bb3d-41ad4596526c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Calling tool with arguments:\n",
"\n",
"{'int_arg': 5, 'float_arg': 2.1}\n",
"\n",
"raised the following error:\n",
"\n",
"<class 'pydantic.v1.error_wrappers.ValidationError'>: 1 validation error for complex_toolSchema\n",
"dict_arg\n",
" field required (type=value_error.missing)\n"
]
}
],
"source": [
"print(\n",
" chain.invoke(\n",
" \"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg\"\n",
@@ -226,7 +238,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 17,
"id": "02cc4223-35fa-4240-976a-012299ca703c",
"metadata": {},
"outputs": [
@@ -236,22 +248,19 @@
"10.5"
]
},
"execution_count": 10,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n",
"\n",
"better_model = ChatOpenAI(model=\"gpt-4-1106-preview\", temperature=0).bind_tools(\n",
" [complex_tool], tool_choice=\"complex_tool\"\n",
")\n",
"\n",
"better_chain = better_model | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n",
"\n",
"chain_with_fallback = chain.with_fallbacks([better_chain])\n",
"\n",
"chain_with_fallback.invoke(\n",
" \"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg\"\n",
")"
@@ -262,7 +271,7 @@
"id": "412f8c4e-cc83-4d87-84a1-5ba2f8edb1e9",
"metadata": {},
"source": [
"Looking at the [LangSmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds."
"Looking at the [Langsmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds."
]
},
{
@@ -277,13 +286,17 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 13,
"id": "b5659956-9454-468a-9753-a3ff9052b8f5",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"from typing import Any\n",
"\n",
"from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"\n",
"class CustomToolException(Exception):\n",
@@ -323,7 +336,7 @@
"# affect the prompt at all, but gives us the option to insert an arbitrary list of Messages\n",
"# into the prompt if needed. We'll use this on retries to insert the error message.\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"human\", \"{input}\"), (\"placeholder\", \"{last_output}\")]\n",
" [(\"human\", \"{input}\"), MessagesPlaceholder(\"last_output\", optional=True)]\n",
")\n",
"chain = prompt | llm_with_tools | tool_custom_exception\n",
"\n",
@@ -335,7 +348,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 14,
"id": "4c45f5bd-cbb4-47d5-b4b6-aec50673c750",
"metadata": {},
"outputs": [
@@ -345,7 +358,7 @@
"10.5"
]
},
"execution_count": 12,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@@ -365,24 +378,6 @@
"source": [
"And our chain succeeds! Looking at the [LangSmith trace](https://smith.langchain.com/public/c11e804c-e14f-4059-bd09-64766f999c14/r), we can see that indeed our initial chain still fails, and it's only on retrying that the chain succeeds."
]
},
{
"cell_type": "markdown",
"id": "6b97af9f",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"Now you've seen some strategies how to handle tool calling errors. Next, you can learn more about how to use tools:\n",
"\n",
"- Few shot prompting [with tools](/docs/how_to/tools_few_shot/)\n",
"- Stream [tool calls](/docs/how_to/tool_streaming/)\n",
"- Pass [runtime values to tools](/docs/how_to/tool_runtime)\n",
"\n",
"You can also check out some more specific uses of tool calling:\n",
"\n",
"- Getting [structured outputs](/docs/how_to/structured_output/) from models"
]
}
],
"metadata": {
@@ -401,7 +396,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.10.1"
}
},
"nbformat": 4,

View File

@@ -540,7 +540,7 @@
"id": "137662a6"
},
"source": [
"## Example usage within RunnableWithMessageHistory "
"## Example usage within a Conversation Chains"
]
},
{
@@ -550,7 +550,7 @@
"id": "79efa62d"
},
"source": [
"Like any other integration, ChatNVIDIA is fine to support chat utilities like RunnableWithMessageHistory which is analogous to using `ConversationChain`. Below, we show the [LangChain RunnableWithMessageHistory](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
"Like any other integration, ChatNVIDIA is fine to support chat utilities like conversation buffers by default. Below, we show the [LangChain ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/types/buffer) example applied to the `mistralai/mixtral-8x22b-instruct-v0.1` model."
]
},
{
@@ -572,19 +572,8 @@
},
"outputs": [],
"source": [
"from langchain_core.chat_history import InMemoryChatMessageHistory\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
"\n",
"# store is a dictionary that maps session IDs to their corresponding chat histories.\n",
"store = {} # memory is maintained outside the chain\n",
"\n",
"\n",
"# A function that returns the chat history for a given session ID.\n",
"def get_session_history(session_id: str) -> InMemoryChatMessageHistory:\n",
" if session_id not in store:\n",
" store[session_id] = InMemoryChatMessageHistory()\n",
" return store[session_id]\n",
"\n",
"from langchain.chains import ConversationChain\n",
"from langchain.memory import ConversationBufferMemory\n",
"\n",
"chat = ChatNVIDIA(\n",
" model=\"mistralai/mixtral-8x22b-instruct-v0.1\",\n",
@@ -593,18 +582,24 @@
" top_p=1.0,\n",
")\n",
"\n",
"# Define a RunnableConfig object, with a `configurable` key. session_id determines thread\n",
"config = {\"configurable\": {\"session_id\": \"1\"}}\n",
"\n",
"conversation = RunnableWithMessageHistory(\n",
" chat,\n",
" get_session_history,\n",
")\n",
"\n",
"conversation.invoke(\n",
" \"Hi I'm Srijan Dubey.\", # input or query\n",
" config=config,\n",
")"
"conversation = ConversationChain(llm=chat, memory=ConversationBufferMemory())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f644ff28",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 268
},
"id": "f644ff28",
"outputId": "bae354cc-2118-4e01-ce20-a717ac94d27d"
},
"outputs": [],
"source": [
"conversation.invoke(\"Hi there!\")[\"response\"]"
]
},
{
@@ -621,30 +616,26 @@
},
"outputs": [],
"source": [
"conversation.invoke(\n",
" \"I'm doing well! Just having a conversation with an AI.\",\n",
" config=config,\n",
")"
"conversation.invoke(\"I'm doing well! Just having a conversation with an AI.\")[\n",
" \"response\"\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "uHIMZxVSVNBC",
"id": "LyD1xVKmVSs4",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 284
"height": 350
},
"id": "uHIMZxVSVNBC",
"outputId": "79acc89d-a820-4f2c-bac2-afe99da95580"
"id": "LyD1xVKmVSs4",
"outputId": "a1714513-a8fd-4d14-f974-233e39d5c4f5"
},
"outputs": [],
"source": [
"conversation.invoke(\n",
" \"Tell me about yourself.\",\n",
" config=config,\n",
")"
"conversation.invoke(\"Tell me about yourself.\")[\"response\"]"
]
}
],

View File

@@ -2,7 +2,6 @@
"cells": [
{
"cell_type": "raw",
"id": "afaf8039",
"metadata": {},
"source": [
"---\n",
@@ -12,7 +11,6 @@
},
{
"cell_type": "markdown",
"id": "e49f1e0d",
"metadata": {},
"source": [
"# ChatOllama\n",
@@ -25,18 +23,6 @@
"\n",
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n",
"\n",
"## Overview\n",
"### Integration details\n",
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/v0.2/docs/integrations/chat/ollama) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_ollama.chat_models.ChatOllama.html) | [langchain-ollama](https://api.python.langchain.com/en/latest/ollama_api_reference.html) | ✅ | ❌ | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-ollama?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-ollama?style=flat-square&label=%20) |\n",
"\n",
"### Model features\n",
"| [Tool calling](/docs/how_to/tool_calling/) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ❌ | ❌ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | ❌ | \n",
"\n",
"## Setup\n",
"\n",
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
@@ -54,285 +40,307 @@
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
"* To view all pulled models, use `ollama list`\n",
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n"
]
},
{
"cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {},
"source": [
"If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:"
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
"\n",
"## Usage\n",
"\n",
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n",
"\n",
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` interface.\n",
"\n",
"This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input.\n",
"\n",
"## Interacting with Models \n",
"\n",
"Here are a few ways to interact with pulled local models\n",
"\n",
"#### In the terminal:\n",
"\n",
"* All of your local models are automatically served on `localhost:11434`\n",
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
"\n",
"#### Via an API\n",
"\n",
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
"\n",
"```bash\n",
"curl http://localhost:11434/api/generate -d '{\n",
" \"model\": \"llama3\",\n",
" \"prompt\":\"Why is the sky blue?\"\n",
"}'\n",
"```\n",
"\n",
"See the Ollama [API documentation](https://github.com/jmorganca/ollama/blob/main/docs/api.md) for all endpoints.\n",
"\n",
"#### Via LangChain\n",
"\n",
"See a typical basic example of using Ollama via the `ChatOllama` chat model in your LangChain application. \n",
"\n",
"View the [API Reference for ChatOllama](https://api.python.langchain.com/en/latest/chat_models/langchain_community.chat_models.ollama.ChatOllama.html#langchain_community.chat_models.ollama.ChatOllama) for more."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
]
},
{
"cell_type": "markdown",
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
"metadata": {},
"source": [
"### Installation\n",
"\n",
"The LangChain Ollama integration lives in the `langchain-ollama` package:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-ollama"
]
},
{
"cell_type": "markdown",
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
"metadata": {},
"source": [
"## Instantiation\n",
"\n",
"Now we can instantiate our model object and generate chat completions:\n",
"\n",
"- TODO: Update model instantiation with relevant params."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"outputs": [],
"source": [
"from langchain_ollama import ChatOllama\n",
"\n",
"llm = ChatOllama(\n",
" model=\"llama3\",\n",
" temperature=0,\n",
" # other params...\n",
")"
]
},
{
"cell_type": "markdown",
"id": "2b4f3e15",
"metadata": {},
"source": [
"## Invocation"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "62e0dbc3",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Je adore le programmation.\\n\\n(Note: \"programmation\" is not commonly used in French, but I translated it as \"le programmation\" to maintain the same grammatical structure and meaning as the original English sentence.)', response_metadata={'model': 'llama3', 'created_at': '2024-07-22T17:43:54.731273Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 11094839375, 'load_duration': 10121854667, 'prompt_eval_count': 36, 'prompt_eval_duration': 146569000, 'eval_count': 46, 'eval_duration': 816593000}, id='run-befccbdc-e1f9-42a9-85cf-e69b926d6b8b-0', usage_metadata={'input_tokens': 36, 'output_tokens': 46, 'total_tokens': 82})"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.messages import AIMessage\n",
"\n",
"messages = [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n",
" ),\n",
" (\"human\", \"I love programming.\"),\n",
"]\n",
"ai_msg = llm.invoke(messages)\n",
"ai_msg"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Je adore le programmation.\n",
"Why did the astronaut break up with his girlfriend?\n",
"\n",
"(Note: \"programmation\" is not commonly used in French, but I translated it as \"le programmation\" to maintain the same grammatical structure and meaning as the original English sentence.)\n"
"Because he needed space!\n"
]
}
],
"source": [
"print(ai_msg.content)"
]
},
{
"cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
"metadata": {},
"source": [
"## Chaining\n",
"\n",
"We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Ich liebe Programmieren!\\n\\n(Note: \"Ich liebe\" means \"I love\", \"Programmieren\" is the verb for \"programming\")', response_metadata={'model': 'llama3', 'created_at': '2024-07-04T04:22:33.864132Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 1310800083, 'load_duration': 1782000, 'prompt_eval_count': 16, 'prompt_eval_duration': 250199000, 'eval_count': 29, 'eval_duration': 1057192000}, id='run-cbadbe59-2de2-4ec0-a18a-b3220226c3d2-0')"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# LangChain supports many other chat models. Here, we're using Ollama\n",
"from langchain_community.chat_models import ChatOllama\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"# supports many more optional parameters. Hover on your `ChatOllama(...)`\n",
"# class to view the latest available supported parameters\n",
"llm = ChatOllama(model=\"llama3\")\n",
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
"# using LangChain Expressive Language chain syntax\n",
"# learn more about the LCEL on\n",
"# /docs/concepts/#langchain-expression-language-lcel\n",
"chain = prompt | llm | StrOutputParser()\n",
"\n",
"# for brevity, response is printed in terminal\n",
"# You can use LangServe to deploy your application for\n",
"# production\n",
"print(chain.invoke({\"topic\": \"Space travel\"}))"
]
},
{
"cell_type": "markdown",
"id": "0f51345d-0a9d-43f1-8fca-d0662cb8e21b",
"metadata": {},
"source": [
"## Tool calling\n",
"\n",
"We can use [tool calling](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/) with an LLM [that has been fine-tuned for tool use](https://ollama.com/library/llama3-groq-tool-use): \n",
"\n",
"```\n",
"ollama pull llama3-groq-tool-use\n",
"```\n",
"\n",
"We can just pass normal Python functions directly as tools."
"LCEL chains, out of the box, provide extra functionalities, such as streaming of responses, and async support"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "5250bceb-1029-41ff-b447-983518704d88",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'validate_user',\n",
" 'args': {'addresses': ['123 Fake St, Boston MA',\n",
" '234 Pretend Boulevard, Houston TX'],\n",
" 'user_id': 123},\n",
" 'id': 'fe2148d3-95fb-48e9-845a-4bfecc1f1f96',\n",
" 'type': 'tool_call'}]"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"Why\n",
" did\n",
" the\n",
" astronaut\n",
" break\n",
" up\n",
" with\n",
" his\n",
" girlfriend\n",
" before\n",
" going\n",
" to\n",
" Mars\n",
"?\n",
"\n",
"\n",
"Because\n",
" he\n",
" needed\n",
" space\n",
"!\n",
"\n"
]
}
],
"source": [
"from typing import List\n",
"topic = {\"topic\": \"Space travel\"}\n",
"\n",
"from langchain_ollama import ChatOllama\n",
"from typing_extensions import TypedDict\n",
"\n",
"\n",
"def validate_user(user_id: int, addresses: List) -> bool:\n",
" \"\"\"Validate user using historical addresses.\n",
"\n",
" Args:\n",
" user_id: (int) the user ID.\n",
" addresses: Previous addresses.\n",
" \"\"\"\n",
" return True\n",
"\n",
"\n",
"llm = ChatOllama(\n",
" model=\"llama3-groq-tool-use\",\n",
" temperature=0,\n",
").bind_tools([validate_user])\n",
"\n",
"result = llm.invoke(\n",
" \"Could you validate user 123? They previously lived at \"\n",
" \"123 Fake St in Boston MA and 234 Pretend Boulevard in \"\n",
" \"Houston TX.\"\n",
")\n",
"result.tool_calls"
"for chunks in chain.stream(topic):\n",
" print(chunks)"
]
},
{
"cell_type": "markdown",
"id": "2bb034ff-218f-4865-afea-3f5e57d3bdee",
"metadata": {},
"source": [
"We look at the LangSmith trace to see that the tool call was performed: \n",
"For streaming async support, here's an example - all possible via the single chain created above."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"topic = {\"topic\": \"Space travel\"}\n",
"\n",
"https://smith.langchain.com/public/4169348a-d6be-45df-a7cf-032f6baa4697/r\n",
"\n",
"In particular, the trace shows how the tool schema was populated."
"async for chunks in chain.astream(topic):\n",
" print(chunks)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Take a look at the [LangChain Expressive Language (LCEL) Interface](/docs/concepts#interface) for the other available interfaces for use when a chain is created.\n",
"\n",
"## Building from source\n",
"\n",
"For up to date instructions on building from source, check the Ollama documentation on [Building from Source](https://github.com/ollama/ollama?tab=readme-ov-file#building)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Extraction\n",
" \n",
"Use the latest version of Ollama and supply the [`format`](https://github.com/jmorganca/ollama/blob/main/docs/api.md#json-mode) flag. The `format` flag will force the model to produce the response in JSON.\n",
"\n",
"> **Note:** You can also try out the experimental [OllamaFunctions](/docs/integrations/chat/ollama_functions) wrapper for convenience."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatOllama\n",
"\n",
"llm = ChatOllama(model=\"llama3\", format=\"json\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"content='{ \"morning\": \"blue\", \"noon\": \"clear blue\", \"afternoon\": \"hazy yellow\", \"evening\": \"orange-red\" }\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n ' id='run-e893700f-e2d0-4df8-ad86-17525dcee318-0'\n"
]
}
],
"source": [
"from langchain_core.messages import HumanMessage\n",
"\n",
"messages = [\n",
" HumanMessage(\n",
" content=\"What color is the sky at different times of the day? Respond using JSON\"\n",
" )\n",
"]\n",
"\n",
"chat_model_response = llm.invoke(messages)\n",
"print(chat_model_response)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Name: John\n",
"Age: 35\n",
"Likes: Pizza\n"
]
}
],
"source": [
"import json\n",
"\n",
"from langchain_community.chat_models import ChatOllama\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"json_schema = {\n",
" \"title\": \"Person\",\n",
" \"description\": \"Identifying information about a person.\",\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"name\": {\"title\": \"Name\", \"description\": \"The person's name\", \"type\": \"string\"},\n",
" \"age\": {\"title\": \"Age\", \"description\": \"The person's age\", \"type\": \"integer\"},\n",
" \"fav_food\": {\n",
" \"title\": \"Fav Food\",\n",
" \"description\": \"The person's favorite food\",\n",
" \"type\": \"string\",\n",
" },\n",
" },\n",
" \"required\": [\"name\", \"age\"],\n",
"}\n",
"\n",
"llm = ChatOllama(model=\"llama2\")\n",
"\n",
"messages = [\n",
" HumanMessage(\n",
" content=\"Please tell me about a person using the following JSON schema:\"\n",
" ),\n",
" HumanMessage(content=\"{dumps}\"),\n",
" HumanMessage(\n",
" content=\"Now, considering the schema, tell me about a person named John who is 35 years old and loves pizza.\"\n",
" ),\n",
"]\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(messages)\n",
"dumps = json.dumps(json_schema, indent=2)\n",
"\n",
"chain = prompt | llm | StrOutputParser()\n",
"\n",
"print(chain.invoke({\"dumps\": dumps}))"
]
},
{
"cell_type": "markdown",
"id": "4c5e0197",
"metadata": {},
"source": [
"## Multi-modal\n",
"\n",
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.com/library/bakllava) and [llava](https://ollama.com/library/llava).\n",
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
"\n",
" ollama pull bakllava\n",
"Browse the full set of versions for models with `tags`, such as [Llava](https://ollama.ai/library/llava/tags).\n",
"\n",
"Be sure to update Ollama so that you have the most recent version to support multi-modal."
"Download the desired LLM via `ollama pull bakllava`\n",
"\n",
"Be sure to update Ollama so that you have the most recent version to support multi-modal.\n",
"\n",
"Check out the typical example of how to use ChatOllama multi-modal support below:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "36c9b1c2",
"execution_count": 18,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"!pip install --upgrade --quiet pillow"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
@@ -391,8 +399,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"id": "32b3ba7b",
"execution_count": 5,
"metadata": {},
"outputs": [
{
@@ -404,8 +411,8 @@
}
],
"source": [
"from langchain_community.chat_models import ChatOllama\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_ollama import ChatOllama\n",
"\n",
"llm = ChatOllama(model=\"bakllava\", temperature=0)\n",
"\n",
@@ -442,12 +449,20 @@
},
{
"cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
"metadata": {},
"source": [
"## API reference\n",
"## Concurrency Features\n",
"\n",
"For detailed documentation of all ChatOllama features and configurations head to the API reference: https://api.python.langchain.com/en/latest/chat_models/langchain_ollama.chat_models.ChatOllama.html"
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
"\n",
"Start the Ollama server with:\n",
"\n",
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
"\n",
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
"\n",
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
]
}
],
@@ -471,5 +486,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 5
"nbformat_minor": 4
}

View File

@@ -6,7 +6,6 @@
"source": [
"---\n",
"sidebar_label: Ollama Functions\n",
"sidebar_class_name: hidden\n",
"---"
]
},
@@ -16,16 +15,16 @@
"source": [
"# OllamaFunctions\n",
"\n",
":::warning\n",
"\n",
"This was an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. The [primary Ollama integration](/docs/integrations/chat/ollama/) now supports tool calling, and should be used instead.\n",
"\n",
":::\n",
"This notebook shows how to use an experimental wrapper around Ollama that gives it [tool calling capabilities](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling).\n",
"\n",
"Note that more powerful and capable models will perform better with complex schema and/or multiple functions. The examples below use llama3 and phi3 models.\n",
"For a complete list of supported models and model variants, see the [Ollama model library](https://ollama.ai/library).\n",
"\n",
":::warning\n",
"\n",
"This is an experimental wrapper that attempts to bolt-on tool calling support to models that do not natively support it. Use with caution.\n",
"\n",
":::\n",
"## Overview\n",
"\n",
"### Integration details\n",

View File

@@ -1,19 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "bd931196",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "344fc5a3",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "a792e839",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "61c2629c",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "e329385c",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "3169f380",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "87552e5a",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "9a10cdcc",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "raw",
"id": "41200199",
"metadata": {},
"source": [
"---\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1f3a5ebf",

View File

@@ -1,484 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "6b74f73d-1763-42d0-9c24-8f65f445bb72",
"metadata": {},
"source": [
"# Dedoc\n",
"\n",
"This sample demonstrates the use of `Dedoc` in combination with `LangChain` as a `DocumentLoader`.\n",
"\n",
"## Overview\n",
"\n",
"[Dedoc](https://dedoc.readthedocs.io) is an [open-source](https://github.com/ispras/dedoc)\n",
"library/service that extracts texts, tables, attached files and document structure\n",
"(e.g., titles, list items, etc.) from files of various formats.\n",
"\n",
"`Dedoc` supports `DOCX`, `XLSX`, `PPTX`, `EML`, `HTML`, `PDF`, images and more.\n",
"Full list of supported formats can be found [here](https://dedoc.readthedocs.io/en/latest/#id1).\n",
"\n",
"\n",
"### Integration details\n",
"\n",
"| Class | Package | Local | Serializable | JS support |\n",
"|:-----------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------|:-----:|:------------:|:----------:|\n",
"| [DedocFileLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocFileLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ |\n",
"| [DedocPDFLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.DedocPDFLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ | \n",
"| [DedocAPIFileLoader](https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocAPIFileLoader.html) | [langchain_community](https://api.python.langchain.com/en/latest/community_api_reference.html) | ❌ | beta | ❌ | \n",
"\n",
"\n",
"### Loader features\n",
"\n",
"Methods for lazy loading and async loading are available, but in fact, document loading is executed synchronously.\n",
"\n",
"| Source | Document Lazy Loading | Async Support |\n",
"|:------------------:|:---------------------:|:-------------:| \n",
"| DedocFileLoader | ❌ | ❌ |\n",
"| DedocPDFLoader | ❌ | ❌ | \n",
"| DedocAPIFileLoader | ❌ | ❌ | \n",
"\n",
"## Setup\n",
"\n",
"* To access `DedocFileLoader` and `DedocPDFLoader` document loaders, you'll need to install the `dedoc` integration package.\n",
"* To access `DedocAPIFileLoader`, you'll need to run the `Dedoc` service, e.g. `Docker` container (please see [the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker) \n",
"for more details):\n",
"\n",
"```bash\n",
"docker pull dedocproject/dedoc\n",
"docker run -p 1231:1231\n",
"```\n",
"\n",
"`Dedoc` installation instruction is given [here](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html)."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "511c109d-a5c3-42ba-914e-5d1b385bc40f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# Install package\n",
"%pip install --quiet \"dedoc[torch]\""
]
},
{
"cell_type": "markdown",
"id": "6820c0e9-d56d-4899-b8c8-374760360e2b",
"metadata": {},
"source": [
"## Instantiation"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "c1f98cae-71ec-4d60-87fb-96c1a76851d8",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import DedocFileLoader\n",
"\n",
"loader = DedocFileLoader(\"./example_data/state_of_the_union.txt\")"
]
},
{
"cell_type": "markdown",
"id": "5d7bc2b3-73a0-4cd6-8014-cc7184aa9d4a",
"metadata": {},
"source": [
"## Load"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "b9097c14-6168-4726-819e-24abb9a63b13",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and t'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"docs = loader.load()\n",
"docs[0].page_content[:100]"
]
},
{
"cell_type": "markdown",
"id": "9ed8bd46-0047-4ccc-b2d6-beb7761f7312",
"metadata": {},
"source": [
"## Lazy Load"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "6ae12d7e-8105-4bbe-9031-0e968475f6bf",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and t\n"
]
}
],
"source": [
"docs = loader.lazy_load()\n",
"\n",
"for doc in docs:\n",
" print(doc.page_content[:100])\n",
" break"
]
},
{
"cell_type": "markdown",
"id": "8772ae40-6239-4751-bb2d-b4a9415c1ad1",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For detailed information on configuring and calling `Dedoc` loaders, please see the API references: \n",
"\n",
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocFileLoader.html\n",
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.pdf.DedocPDFLoader.html\n",
"* https://api.python.langchain.com/en/latest/document_loaders/langchain_community.document_loaders.dedoc.DedocAPIFileLoader.html"
]
},
{
"cell_type": "markdown",
"id": "c4d5e702-0e21-4cad-a4c3-b9b3bff77203",
"metadata": {},
"source": [
"## Loading any file\n",
"\n",
"For automatic handling of any file in a [supported format](https://dedoc.readthedocs.io/en/latest/#id1),\n",
"`DedocFileLoader` can be useful.\n",
"The file loader automatically detects the file type with a correct extension.\n",
"\n",
"File parsing process can be configured through `dedoc_kwargs` during the `DedocFileLoader` class initialization.\n",
"Here the basic examples of some options usage are given, \n",
"please see the documentation of `DedocFileLoader` and \n",
"[dedoc documentation](https://dedoc.readthedocs.io/en/latest/parameters/parameters.html) \n",
"to get more details about configuration parameters."
]
},
{
"cell_type": "markdown",
"id": "de97d0ed-d6b1-44e0-b392-1f3d89c762f9",
"metadata": {},
"source": [
"### Basic example"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "50ffeeee-db12-4801-b208-7e32ea3d72ad",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\n\\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\n\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\n\\n\\nWith a duty to one another to the American people to '"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import DedocFileLoader\n",
"\n",
"loader = DedocFileLoader(\"./example_data/state_of_the_union.txt\")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[0].page_content[:400]"
]
},
{
"cell_type": "markdown",
"id": "457e5d4c-a4ee-4f31-ae74-3f75a1bbd0af",
"metadata": {},
"source": [
"### Modes of split\n",
"\n",
"`DedocFileLoader` supports different types of document splitting into parts (each part is returned separately).\n",
"For this purpose, `split` parameter is used with the following options:\n",
"* `document` (default value): document text is returned as a single langchain `Document` object (don't split);\n",
"* `page`: split document text into pages (works for `PDF`, `DJVU`, `PPTX`, `PPT`, `ODP`);\n",
"* `node`: split document text into `Dedoc` tree nodes (title nodes, list item nodes, raw text nodes);\n",
"* `line`: split document text into textual lines."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "eec54d31-ae7a-4a3c-aa10-4ae276b1e4c4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = DedocFileLoader(\n",
" \"./example_data/layout-parser-paper.pdf\",\n",
" split=\"page\",\n",
" pages=\":2\",\n",
")\n",
"\n",
"docs = loader.load()\n",
"\n",
"len(docs)"
]
},
{
"cell_type": "markdown",
"id": "61e11769-4780-4f77-b10e-27db6936f226",
"metadata": {},
"source": [
"### Handling tables\n",
"\n",
"`DedocFileLoader` supports tables handling when `with_tables` parameter is \n",
"set to `True` during loader initialization (`with_tables=True` by default). \n",
"\n",
"Tables are not split - each table corresponds to one langchain `Document` object.\n",
"For tables, `Document` object has additional `metadata` fields `type=\"table\"` \n",
"and `text_as_html` with table `HTML` representation."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "bbeb2f8a-ac5e-4b59-8026-7ea3fc14c928",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"('table',\n",
" '<table border=\"1\" style=\"border-collapse: collapse; width: 100%;\">\\n<tbody>\\n<tr>\\n<td colspan=\"1\" rowspan=\"1\">Team</td>\\n<td colspan=\"1\" rowspan=\"1\"> &quot;Payroll (millions)&quot;</td>\\n<td colspan=\"1\" r')"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = DedocFileLoader(\"./example_data/mlb_teams_2012.csv\")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[1].metadata[\"type\"], docs[1].metadata[\"text_as_html\"][:200]"
]
},
{
"cell_type": "markdown",
"id": "b4a2b872-2aba-4e4c-8b2f-83a5a81ee1da",
"metadata": {},
"source": [
"### Handling attached files\n",
"\n",
"`DedocFileLoader` supports attached files handling when `with_attachments` is set \n",
"to `True` during loader initialization (`with_attachments=False` by default). \n",
"\n",
"Attachments are split according to the `split` parameter.\n",
"For attachments, langchain `Document` object has an additional metadata \n",
"field `type=\"attachment\"`."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "bb9d6c1c-e24c-4979-88a0-38d54abd6332",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"('attachment',\n",
" '\\nContent-Type\\nmultipart/mixed; boundary=\"0000000000005d654405f082adb7\"\\nDate\\nFri, 23 Dec 2022 12:08:48 -0600\\nFrom\\nMallori Harrell <mallori@unstructured.io>\\nMIME-Version\\n1.0\\nMessage-ID\\n<CAPgNNXSzLVJ-d1OCX_TjFgJU7ugtQrjFybPtAMmmYZzphxNFYg@mail.gmail.com>\\nSubject\\nFake email with attachment\\nTo\\nMallori Harrell <mallori@unstructured.io>')"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loader = DedocFileLoader(\n",
" \"./example_data/fake-email-attachment.eml\",\n",
" with_attachments=True,\n",
")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[1].metadata[\"type\"], docs[1].page_content"
]
},
{
"cell_type": "markdown",
"id": "d435c3f6-703a-4064-8307-ace140de967a",
"metadata": {},
"source": [
"## Loading PDF file\n",
"\n",
"If you want to handle only `PDF` documents, you can use `DedocPDFLoader` with only `PDF` support.\n",
"The loader supports the same parameters for document split, tables and attachments extraction.\n",
"\n",
"`Dedoc` can extract `PDF` with or without a textual layer, \n",
"as well as automatically detect its presence and correctness.\n",
"Several `PDF` handlers are available, you can use `pdf_with_text_layer` \n",
"parameter to choose one of them.\n",
"Please see [parameters description](https://dedoc.readthedocs.io/en/latest/parameters/pdf_handling.html) \n",
"to get more details.\n",
"\n",
"For `PDF` without a textual layer, `Tesseract OCR` and its language packages should be installed.\n",
"In this case, [the instruction](https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html) can be useful."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "0103a7f3-6b5e-4444-8f4d-83dd3724a9af",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\n2\\n\\nZ. Shen et al.\\n\\n37], layout detection [38, 22], table detection [26], and scene text detection [4].\\n\\nA generalized learning-based framework dramatically reduces the need for the\\n\\nmanual specification of complicated rules, which is the status quo with traditional\\n\\nmethods. DL has the potential to transform DIA pipelines and benefit a broad\\n\\nspectrum of large-scale document digitization projects.\\n'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import DedocPDFLoader\n",
"\n",
"loader = DedocPDFLoader(\n",
" \"./example_data/layout-parser-paper.pdf\", pdf_with_text_layer=\"true\", pages=\"2:2\"\n",
")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[0].page_content[:400]"
]
},
{
"cell_type": "markdown",
"id": "13061995-1805-40c2-a77a-a6cd80999e20",
"metadata": {},
"source": [
"## Dedoc API\n",
"\n",
"If you want to get up and running with less set up, you can use `Dedoc` as a service.\n",
"**`DedocAPIFileLoader` can be used without installation of `dedoc` library.**\n",
"The loader supports the same parameters as `DedocFileLoader` and\n",
"also automatically detects input file types.\n",
"\n",
"To use `DedocAPIFileLoader`, you should run the `Dedoc` service, e.g. `Docker` container (please see [the documentation](https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker) \n",
"for more details):\n",
"\n",
"```bash\n",
"docker pull dedocproject/dedoc\n",
"docker run -p 1231:1231\n",
"```\n",
"\n",
"Please do not use our demo URL `https://dedoc-readme.hf.space` in your code."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "211fc0b5-6080-4974-a6c1-f982bafd87d6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\nMadam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\n\\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\n\\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\n\\n\\nWith a duty to one another to the American people to '"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.document_loaders import DedocAPIFileLoader\n",
"\n",
"loader = DedocAPIFileLoader(\n",
" \"./example_data/state_of_the_union.txt\",\n",
" url=\"https://dedoc-readme.hf.space\",\n",
")\n",
"\n",
"docs = loader.load()\n",
"\n",
"docs[0].page_content[:400]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "faaff475-5209-436f-bcde-97d58daed05c",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.19"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -162,7 +162,7 @@
"metadata": {},
"outputs": [],
"source": [
"!poetry run pip install --upgrade langchain-openai tiktoken langchain-chroma hnswlib"
"!poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib"
]
},
{
@@ -211,7 +211,7 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
"\n",
"embedding = OpenAIEmbeddings()\n",
@@ -365,7 +365,7 @@
"source": [
"from langchain.chains.query_constructor.schema import AttributeInfo\n",
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"\n",
"EXCLUDE_KEYS = [\"id\", \"xpath\", \"structure\"]\n",
"metadata_field_info = [\n",
@@ -540,7 +540,7 @@
"source": [
"from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"# The vectorstore to use to index the child chunks\n",

View File

@@ -37,7 +37,7 @@
"scrapfly_loader = ScrapflyLoader(\n",
" [\"https://web-scraping.dev/products\"],\n",
" api_key=\"Your ScrapFly API key\", # Get your API key from https://www.scrapfly.io/\n",
" continue_on_failure=True, # Ignore unprocessable web pages and log their exceptions\n",
" ignore_scrape_failures=True, # Ignore unprocessable web pages and log their exceptions\n",
")\n",
"\n",
"# Load documents from URLs as markdown\n",
@@ -72,7 +72,7 @@
"scrapfly_loader = ScrapflyLoader(\n",
" [\"https://web-scraping.dev/products\"],\n",
" api_key=\"Your ScrapFly API key\", # Get your API key from https://www.scrapfly.io/\n",
" continue_on_failure=True, # Ignore unprocessable web pages and log their exceptions\n",
" ignore_scrape_failures=True, # Ignore unprocessable web pages and log their exceptions\n",
" scrape_config=scrapfly_scrape_config, # Pass the scrape_config object\n",
" scrape_format=\"markdown\", # The scrape result format, either `markdown`(default) or `text`\n",
")\n",

View File

@@ -316,7 +316,7 @@
"id": "eb00a625-a6c9-4766-b3f0-eaed024851c9",
"metadata": {},
"source": [
"## Return SPARQL query\n",
"## Return SQARQL query\n",
"You can return the SPARQL query step from the Sparql QA Chain using the `return_sparql_query` parameter"
]
},
@@ -358,7 +358,7 @@
"\u001b[32;1m\u001b[1;3m[]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"SPARQL query: PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n",
"SQARQL query: PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n",
"SELECT ?workHomepage\n",
"WHERE {\n",
" ?person foaf:name \"Tim Berners-Lee\" .\n",
@@ -370,7 +370,7 @@
],
"source": [
"result = chain(\"What is Tim Berners-Lee's work homepage?\")\n",
"print(f\"SPARQL query: {result['sparql_query']}\")\n",
"print(f\"SQARQL query: {result['sparql_query']}\")\n",
"print(f\"Final answer: {result['result']}\")"
]
},

View File

@@ -12,7 +12,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 9,
"id": "10ad9224",
"metadata": {
"ExecuteTime": {
@@ -1809,6 +1809,7 @@
"cell_type": "markdown",
"id": "0c69d84d",
"metadata": {
"jp-MarkdownHeadingCollapsed": true,
"tags": []
},
"source": [
@@ -1890,6 +1891,7 @@
"cell_type": "markdown",
"id": "5da41b77",
"metadata": {
"jp-MarkdownHeadingCollapsed": true,
"tags": []
},
"source": [
@@ -2147,7 +2149,6 @@
},
{
"cell_type": "markdown",
"id": "2ac1a8c7",
"metadata": {},
"source": [
"## SingleStoreDB Semantic Cache\n",
@@ -2172,353 +2173,6 @@
")"
]
},
{
"cell_type": "markdown",
"id": "7019c991-0101-4f9c-b212-5729a5471293",
"metadata": {},
"source": [
"## Couchbase Caches\n",
"\n",
"Use [Couchbase](https://couchbase.com/) as a cache for prompts and responses."
]
},
{
"cell_type": "markdown",
"id": "d6aac680-ba32-4c19-8864-6471cf0e7d5a",
"metadata": {},
"source": [
"### Couchbase Cache\n",
"\n",
"The standard cache that looks for an exact match of the user prompt."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "9b4764e4-c75f-4185-b326-524287a826be",
"metadata": {},
"outputs": [],
"source": [
"# Create couchbase connection object\n",
"from datetime import timedelta\n",
"\n",
"from couchbase.auth import PasswordAuthenticator\n",
"from couchbase.cluster import Cluster\n",
"from couchbase.options import ClusterOptions\n",
"from langchain_couchbase.cache import CouchbaseCache\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"COUCHBASE_CONNECTION_STRING = (\n",
" \"couchbase://localhost\" # or \"couchbases://localhost\" if using TLS\n",
")\n",
"DB_USERNAME = \"Administrator\"\n",
"DB_PASSWORD = \"Password\"\n",
"\n",
"auth = PasswordAuthenticator(DB_USERNAME, DB_PASSWORD)\n",
"options = ClusterOptions(auth)\n",
"cluster = Cluster(COUCHBASE_CONNECTION_STRING, options)\n",
"\n",
"# Wait until the cluster is ready for use.\n",
"cluster.wait_until_ready(timedelta(seconds=5))"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "4b5e73c5-92c1-4eab-84e2-77924ea9c123",
"metadata": {},
"outputs": [],
"source": [
"# Specify the bucket, scope and collection to store the cached documents\n",
"BUCKET_NAME = \"langchain-testing\"\n",
"SCOPE_NAME = \"_default\"\n",
"COLLECTION_NAME = \"_default\"\n",
"\n",
"set_llm_cache(\n",
" CouchbaseCache(\n",
" cluster=cluster,\n",
" bucket_name=BUCKET_NAME,\n",
" scope_name=SCOPE_NAME,\n",
" collection_name=COLLECTION_NAME,\n",
" )\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "db8d28cc-8d93-47b4-8326-57a29a06fb3c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 22.2 ms, sys: 14 ms, total: 36.2 ms\n",
"Wall time: 938 ms\n"
]
},
{
"data": {
"text/plain": [
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%%time\n",
"# The first time, it is not yet in the cache, so it should take longer\n",
"llm.invoke(\"Tell me a joke\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "b470dc81-2e7f-4743-9435-ce9071394eea",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 53 ms, sys: 29 ms, total: 82 ms\n",
"Wall time: 84.2 ms\n"
]
},
{
"data": {
"text/plain": [
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was two-tired!\""
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%%time\n",
"# The second time, it is in the cache, so it should be much faster\n",
"llm.invoke(\"Tell me a joke\")"
]
},
{
"cell_type": "markdown",
"id": "43626f33-d184-4260-b641-c9341cef5842",
"metadata": {},
"source": [
"### Couchbase Semantic Cache\n",
"Semantic caching allows users to retrieve cached prompts based on semantic similarity between the user input and previously cached inputs. Under the hood it uses Couchbase as both a cache and a vectorstore. This needs an appropriate Vector Search Index defined to work. Please look at the usage example on how to set up the index."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "6b470c03-d7fe-4270-89e1-638251619a53",
"metadata": {},
"outputs": [],
"source": [
"# Create Couchbase connection object\n",
"from datetime import timedelta\n",
"\n",
"from couchbase.auth import PasswordAuthenticator\n",
"from couchbase.cluster import Cluster\n",
"from couchbase.options import ClusterOptions\n",
"from langchain_couchbase.cache import CouchbaseSemanticCache\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"\n",
"COUCHBASE_CONNECTION_STRING = (\n",
" \"couchbase://localhost\" # or \"couchbases://localhost\" if using TLS\n",
")\n",
"DB_USERNAME = \"Administrator\"\n",
"DB_PASSWORD = \"Password\"\n",
"\n",
"auth = PasswordAuthenticator(DB_USERNAME, DB_PASSWORD)\n",
"options = ClusterOptions(auth)\n",
"cluster = Cluster(COUCHBASE_CONNECTION_STRING, options)\n",
"\n",
"# Wait until the cluster is ready for use.\n",
"cluster.wait_until_ready(timedelta(seconds=5))"
]
},
{
"cell_type": "markdown",
"id": "f831bc4c-f330-4bd7-9b80-76771d91827e",
"metadata": {},
"source": [
"Notes:\n",
"- The search index for the semantic cache needs to be defined before using the semantic cache. \n",
"- The optional parameter, `score_threshold` in the Semantic Cache that you can use to tune the results of the semantic search.\n",
"\n",
"### How to Import an Index to the Full Text Search service?\n",
" - [Couchbase Server](https://docs.couchbase.com/server/current/search/import-search-index.html)\n",
" - Click on Search -> Add Index -> Import\n",
" - Copy the following Index definition in the Import screen\n",
" - Click on Create Index to create the index.\n",
" - [Couchbase Capella](https://docs.couchbase.com/cloud/search/import-search-index.html)\n",
" - Copy the index definition to a new file `index.json`\n",
" - Import the file in Capella using the instructions in the documentation.\n",
" - Click on Create Index to create the index.\n",
"\n",
"#### Example index for the vector search. \n",
" ```\n",
" {\n",
" \"type\": \"fulltext-index\",\n",
" \"name\": \"langchain-testing._default.semantic-cache-index\",\n",
" \"sourceType\": \"gocbcore\",\n",
" \"sourceName\": \"langchain-testing\",\n",
" \"planParams\": {\n",
" \"maxPartitionsPerPIndex\": 1024,\n",
" \"indexPartitions\": 16\n",
" },\n",
" \"params\": {\n",
" \"doc_config\": {\n",
" \"docid_prefix_delim\": \"\",\n",
" \"docid_regexp\": \"\",\n",
" \"mode\": \"scope.collection.type_field\",\n",
" \"type_field\": \"type\"\n",
" },\n",
" \"mapping\": {\n",
" \"analysis\": {},\n",
" \"default_analyzer\": \"standard\",\n",
" \"default_datetime_parser\": \"dateTimeOptional\",\n",
" \"default_field\": \"_all\",\n",
" \"default_mapping\": {\n",
" \"dynamic\": true,\n",
" \"enabled\": false\n",
" },\n",
" \"default_type\": \"_default\",\n",
" \"docvalues_dynamic\": false,\n",
" \"index_dynamic\": true,\n",
" \"store_dynamic\": true,\n",
" \"type_field\": \"_type\",\n",
" \"types\": {\n",
" \"_default.semantic-cache\": {\n",
" \"dynamic\": false,\n",
" \"enabled\": true,\n",
" \"properties\": {\n",
" \"embedding\": {\n",
" \"dynamic\": false,\n",
" \"enabled\": true,\n",
" \"fields\": [\n",
" {\n",
" \"dims\": 1536,\n",
" \"index\": true,\n",
" \"name\": \"embedding\",\n",
" \"similarity\": \"dot_product\",\n",
" \"type\": \"vector\",\n",
" \"vector_index_optimized_for\": \"recall\"\n",
" }\n",
" ]\n",
" },\n",
" \"metadata\": {\n",
" \"dynamic\": true,\n",
" \"enabled\": true\n",
" },\n",
" \"text\": {\n",
" \"dynamic\": false,\n",
" \"enabled\": true,\n",
" \"fields\": [\n",
" {\n",
" \"index\": true,\n",
" \"name\": \"text\",\n",
" \"store\": true,\n",
" \"type\": \"text\"\n",
" }\n",
" ]\n",
" }\n",
" }\n",
" }\n",
" }\n",
" },\n",
" \"store\": {\n",
" \"indexType\": \"scorch\",\n",
" \"segmentVersion\": 16\n",
" }\n",
" },\n",
" \"sourceParams\": {}\n",
" }\n",
" ```"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "ae0766c8-ea34-4604-b0dc-cf2bbe8077f4",
"metadata": {},
"outputs": [],
"source": [
"BUCKET_NAME = \"langchain-testing\"\n",
"SCOPE_NAME = \"_default\"\n",
"COLLECTION_NAME = \"semantic-cache\"\n",
"INDEX_NAME = \"semantic-cache-index\"\n",
"embeddings = OpenAIEmbeddings()\n",
"\n",
"cache = CouchbaseSemanticCache(\n",
" cluster=cluster,\n",
" embedding=embeddings,\n",
" bucket_name=BUCKET_NAME,\n",
" scope_name=SCOPE_NAME,\n",
" collection_name=COLLECTION_NAME,\n",
" index_name=INDEX_NAME,\n",
" score_threshold=0.8,\n",
")\n",
"\n",
"set_llm_cache(cache)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "a2e82743-10ea-4319-b43e-193475ae5449",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"The average lifespan of a dog is around 12 years, but this can vary depending on the breed, size, and overall health of the individual dog. Some smaller breeds may live longer, while larger breeds may have shorter lifespans. Proper care, diet, and exercise can also play a role in extending a dog's lifespan.\n",
"CPU times: user 826 ms, sys: 2.46 s, total: 3.28 s\n",
"Wall time: 2.87 s\n"
]
}
],
"source": [
"%%time\n",
"# The first time, it is not yet in the cache, so it should take longer\n",
"print(llm.invoke(\"How long do dogs live?\"))"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "c36f4e29-d872-4334-a1f1-0e6d10c5d9f2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"The average lifespan of a dog is around 12 years, but this can vary depending on the breed, size, and overall health of the individual dog. Some smaller breeds may live longer, while larger breeds may have shorter lifespans. Proper care, diet, and exercise can also play a role in extending a dog's lifespan.\n",
"CPU times: user 9.82 ms, sys: 2.61 ms, total: 12.4 ms\n",
"Wall time: 311 ms\n"
]
}
],
"source": [
"%%time\n",
"# The second time, it is in the cache, so it should be much faster\n",
"print(llm.invoke(\"What is the expected lifespan of a dog?\"))"
]
},
{
"cell_type": "markdown",
"id": "ae1f5e1c-085e-4998-9f2d-b5867d2c3d5b",
@@ -2574,9 +2228,7 @@
"| langchain_core.caches | [InMemoryCache](https://api.python.langchain.com/en/latest/caches/langchain_core.caches.InMemoryCache.html) |\n",
"| langchain_elasticsearch.cache | [ElasticsearchCache](https://api.python.langchain.com/en/latest/cache/langchain_elasticsearch.cache.ElasticsearchCache.html) |\n",
"| langchain_mongodb.cache | [MongoDBAtlasSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_mongodb.cache.MongoDBAtlasSemanticCache.html) |\n",
"| langchain_mongodb.cache | [MongoDBCache](https://api.python.langchain.com/en/latest/cache/langchain_mongodb.cache.MongoDBCache.html) |\n",
"| langchain_couchbase.cache | [CouchbaseCache](https://api.python.langchain.com/en/latest/cache/langchain_couchbase.cache.CouchbaseCache.html) |\n",
"| langchain_couchbase.cache | [CouchbaseSemanticCache](https://api.python.langchain.com/en/latest/cache/langchain_couchbase.cache.CouchbaseSemanticCache.html) |\n"
"| langchain_mongodb.cache | [MongoDBCache](https://api.python.langchain.com/en/latest/cache/langchain_mongodb.cache.MongoDBCache.html) |\n"
]
},
{
@@ -2604,7 +2256,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.10.12"
}
},
"nbformat": 4,

View File

@@ -14,13 +14,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {
"tags": []
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install --upgrade --quiet langchain-community gpt4all"
"%pip install --upgrade --quiet gpt4all > /dev/null"
]
},
{
@@ -39,7 +47,9 @@
},
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain_community.llms import GPT4All\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate"
]
},
@@ -82,71 +92,7 @@
"\n",
"For more info, visit https://github.com/nomic-ai/gpt4all.\n",
"\n",
"---\n",
"\n",
"This integration does not yet support streaming in chunks via the [`.stream()`](https://python.langchain.com/v0.2/docs/how_to/streaming/) method. The below example uses a callback handler with `streaming=True`:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"local_path = (\n",
" \"./models/Meta-Llama-3-8B-Instruct.Q4_0.gguf\" # replace with your local file path\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Token: Justin\n",
"Token: Bieber\n",
"Token: was\n",
"Token: born\n",
"Token: on\n",
"Token: March\n",
"Token: \n",
"Token: 1\n",
"Token: ,\n",
"Token: \n"
]
}
],
"source": [
"from langchain_core.callbacks import BaseCallbackHandler\n",
"\n",
"count = 0\n",
"\n",
"\n",
"class MyCustomHandler(BaseCallbackHandler):\n",
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
" global count\n",
" if count < 10:\n",
" print(f\"Token: {token}\")\n",
" count += 1\n",
"\n",
"\n",
"# Verbose is required to pass to the callback manager\n",
"llm = GPT4All(model=local_path, callbacks=[MyCustomHandler()], streaming=True)\n",
"\n",
"# If you want to use a custom model add the backend parameter\n",
"# Check https://docs.gpt4all.io/gpt4all_python.html for supported backends\n",
"# llm = GPT4All(model=local_path, backend=\"gptj\", callbacks=callbacks, streaming=True)\n",
"\n",
"chain = prompt | llm\n",
"\n",
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
"\n",
"# Streamed tokens will be logged/aggregated via the passed callback\n",
"res = chain.invoke({\"question\": question})"
"---"
]
},
{
@@ -154,7 +100,56 @@
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
"source": [
"local_path = (\n",
" \"./models/ggml-gpt4all-l13b-snoozy.bin\" # replace with your desired local file path\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Callbacks support token-wise streaming\n",
"callbacks = [StreamingStdOutCallbackHandler()]\n",
"\n",
"# Verbose is required to pass to the callback manager\n",
"llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)\n",
"\n",
"# If you want to use a custom model add the backend parameter\n",
"# Check https://docs.gpt4all.io/gpt4all_python.html for supported backends\n",
"llm = GPT4All(model=local_path, backend=\"gptj\", callbacks=callbacks, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
"\n",
"llm_chain.run(question)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Justin Bieber was born on March 1, 1994. In 1994, The Cowboys won Super Bowl XXVIII."
]
}
],
"metadata": {

View File

@@ -33,7 +33,7 @@
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet transformers"
"%pip install --upgrade --quiet transformers --quiet"
]
},
{
@@ -143,25 +143,6 @@
"print(chain.invoke({\"question\": question}))"
]
},
{
"cell_type": "markdown",
"id": "5141dc4d",
"metadata": {},
"source": [
"Streaming repsonse."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1819250-2db9-4143-b88a-12e92d4e2386",
"metadata": {},
"outputs": [],
"source": [
"for chunk in chain.stream(question):\n",
" print(chunk, end=\"\", flush=True)"
]
},
{
"cell_type": "markdown",
"id": "dbbc3a37",

View File

@@ -1,21 +1,10 @@
{
"cells": [
{
"cell_type": "raw",
"id": "67db2992",
"metadata": {},
"source": [
"---\n",
"sidebar_label: Ollama\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "9597802c",
"metadata": {},
"source": [
"# OllamaLLM\n",
"# Ollama\n",
"\n",
":::caution\n",
"You are currently on a page documenting the use of Ollama models as [text completion models](/docs/concepts/#llms). Many popular Ollama models are [chat completion models](/docs/concepts/#chat-models).\n",
@@ -23,35 +12,21 @@
"You may be looking for [this page instead](/docs/integrations/chat/ollama/).\n",
":::\n",
"\n",
"This page goes over how to use LangChain to interact with `Ollama` models.\n",
"[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally.\n",
"\n",
"Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. \n",
"\n",
"It optimizes setup and configuration details, including GPU usage.\n",
"\n",
"For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/ollama/ollama#model-library).\n",
"\n",
"## Installation"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59c710c4",
"metadata": {},
"outputs": [],
"source": [
"# install package\n",
"%pip install -U langchain-ollama"
]
},
{
"cell_type": "markdown",
"id": "0ee90032",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance:\n",
"First, follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance:\n",
"\n",
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
" * e.g., `ollama pull llama3`\n",
" * View a list of available models via the [model library](https://ollama.ai/library) and pull to use locally with the command `ollama pull llama3`\n",
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
"\n",
"> On Mac, the models will be download to `~/.ollama/models`\n",
@@ -59,67 +34,194 @@
"> On Linux (or WSL), the models will be stored at `/usr/share/ollama/.ollama/models`\n",
"\n",
"* Specify the exact version of the model of interest as such `ollama pull vicuna:13b-v1.5-16k-q4_0` (View the [various tags for the `Vicuna`](https://ollama.ai/library/vicuna/tags) model in this instance)\n",
"* To view all pulled models, use `ollama list`\n",
"* To view all pulled models on your local instance, use `ollama list`\n",
"* To chat directly with a model from the command line, use `ollama run <name-of-model>`\n",
"* View the [Ollama documentation](https://github.com/jmorganca/ollama) for more commands. Run `ollama help` in the terminal to see available commands too.\n",
"* View the [Ollama documentation](https://github.com/ollama/ollama) for more commands. \n",
"* Run `ollama help` in the terminal to see available commands too.\n",
"\n",
"## Usage"
"## Usage\n",
"\n",
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.ollama.Ollama.html).\n",
"\n",
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` [interface](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/).\n",
"\n",
"This includes [special tokens](https://ollama.com/library/llama3) for system message and user input.\n",
"\n",
"## Interacting with Models \n",
"\n",
"Here are a few ways to interact with pulled local models\n",
"\n",
"#### In the terminal:\n",
"\n",
"* All of your local models are automatically served on `localhost:11434`\n",
"* Run `ollama run <name-of-model>` to start interacting via the command line directly\n",
"\n",
"#### Via the API\n",
"\n",
"Send an `application/json` request to the API endpoint of Ollama to interact.\n",
"\n",
"```bash\n",
"curl http://localhost:11434/api/generate -d '{\n",
" \"model\": \"llama3\",\n",
" \"prompt\":\"Why is the sky blue?\"\n",
"}'\n",
"```\n",
"\n",
"See the Ollama [API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) for all endpoints.\n",
"\n",
"#### via LangChain\n",
"\n",
"See a typical basic example of using [Ollama chat model](https://python.langchain.com/v0.2/docs/integrations/chat/ollama/) in your LangChain application."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "035dea0f",
"metadata": {
"tags": []
},
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain-community"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'A great start!\\n\\nLangChain is a type of AI model that uses language processing techniques to generate human-like text based on input prompts or chains of reasoning. In other words, it can have a conversation with humans, understanding the context and responding accordingly.\\n\\nHere\\'s a possible breakdown:\\n\\n* \"Lang\" likely refers to its focus on natural language processing (NLP) and linguistic analysis.\\n* \"Chain\" suggests that LangChain is designed to generate text in response to a series of connected ideas or prompts, rather than simply generating random text.\\n\\nSo, what do you think LangChain\\'s capabilities might be?'"
"\"Here's one:\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\\n\\nHope that made you smile! Do you want to hear another one?\""
]
},
"execution_count": 4,
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_ollama.llms import OllamaLLM\n",
"from langchain_community.llms import Ollama\n",
"\n",
"template = \"\"\"Question: {question}\n",
"llm = Ollama(\n",
" model=\"llama3\"\n",
") # assuming you have Ollama installed and have llama3 model pulled with `ollama pull llama3 `\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"\n",
"model = OllamaLLM(model=\"llama3\")\n",
"\n",
"chain = prompt | model\n",
"\n",
"chain.invoke({\"question\": \"What is LangChain?\"})"
"llm.invoke(\"Tell me a joke\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To stream tokens, use the `.stream(...)` method:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"S\n",
"ure\n",
",\n",
" here\n",
"'\n",
"s\n",
" one\n",
":\n",
"\n",
"\n",
"\n",
"\n",
"Why\n",
" don\n",
"'\n",
"t\n",
" scient\n",
"ists\n",
" trust\n",
" atoms\n",
"?\n",
"\n",
"\n",
"B\n",
"ecause\n",
" they\n",
" make\n",
" up\n",
" everything\n",
"!\n",
"\n",
"\n",
"\n",
"\n",
"I\n",
" hope\n",
" you\n",
" found\n",
" that\n",
" am\n",
"using\n",
"!\n",
" Do\n",
" you\n",
" want\n",
" to\n",
" hear\n",
" another\n",
" one\n",
"?\n",
"\n"
]
}
],
"source": [
"query = \"Tell me a joke\"\n",
"\n",
"for chunks in llm.stream(query):\n",
" print(chunks)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To learn more about the LangChain Expressive Language and the available methods on an LLM, see the [LCEL Interface](/docs/concepts#interface)"
]
},
{
"cell_type": "markdown",
"id": "e2d85456",
"metadata": {},
"source": [
"## Multi-modal\n",
"\n",
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.com/library/bakllava) and [llava](https://ollama.com/library/llava).\n",
"Ollama has support for multi-modal LLMs, such as [bakllava](https://ollama.ai/library/bakllava) and [llava](https://ollama.ai/library/llava).\n",
"\n",
" ollama pull bakllava\n",
"`ollama pull bakllava`\n",
"\n",
"Be sure to update Ollama so that you have the most recent version to support multi-modal."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms import Ollama\n",
"\n",
"bakllava = Ollama(model=\"bakllava\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4043e202",
"metadata": {},
"outputs": [
{
@@ -177,8 +279,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"id": "79aaf863",
"execution_count": 8,
"metadata": {},
"outputs": [
{
@@ -187,24 +288,38 @@
"'90%'"
]
},
"execution_count": 4,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_ollama import OllamaLLM\n",
"\n",
"llm = OllamaLLM(model=\"bakllava\")\n",
"\n",
"llm_with_image_context = llm.bind(images=[image_b64])\n",
"llm_with_image_context = bakllava.bind(images=[image_b64])\n",
"llm_with_image_context.invoke(\"What is the dollar based gross retention rate:\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Concurrency Features\n",
"\n",
"Ollama supports concurrency inference for a single model, and or loading multiple models simulatenously (at least [version 0.1.33](https://github.com/ollama/ollama/releases)).\n",
"\n",
"Start the Ollama server with:\n",
"\n",
"* `OLLAMA_NUM_PARALLEL`: Handle multiple requests simultaneously for a single model\n",
"* `OLLAMA_MAX_LOADED_MODELS`: Load multiple models simultaneously\n",
"\n",
"Example: `OLLAMA_NUM_PARALLEL=4 OLLAMA_MAX_LOADED_MODELS=4 ollama serve`\n",
"\n",
"Learn more about configuring Ollama server in [the official guide](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-configure-ollama-server)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.11.1 64-bit",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -218,14 +333,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.3"
},
"vscode": {
"interpreter": {
"hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
}
"version": "3.11.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
"nbformat_minor": 4
}

View File

@@ -245,7 +245,7 @@
"source": [
"### Streaming\n",
"\n",
"You can use `stream` method to get a streaming of LLM output, "
"To get streaming of LLM output, you can create a Huggingface `TextIteratorStreamer` for `_forward_params`."
]
},
{
@@ -255,11 +255,24 @@
"metadata": {},
"outputs": [],
"source": [
"generation_config = {\"skip_prompt\": True, \"pipeline_kwargs\": {\"max_new_tokens\": 100}}\n",
"chain = prompt | ov_llm.bind(**generation_config)\n",
"from threading import Thread\n",
"\n",
"for chunk in chain.stream(question):\n",
" print(chunk, end=\"\", flush=True)"
"from transformers import TextIteratorStreamer\n",
"\n",
"streamer = TextIteratorStreamer(\n",
" ov_llm.pipeline.tokenizer,\n",
" timeout=30.0,\n",
" skip_prompt=True,\n",
" skip_special_tokens=True,\n",
")\n",
"pipeline_kwargs = {\"pipeline_kwargs\": {\"streamer\": streamer, \"max_new_tokens\": 100}}\n",
"chain = prompt | ov_llm.bind(**pipeline_kwargs)\n",
"\n",
"t1 = Thread(target=chain.invoke, args=({\"question\": question},))\n",
"t1.start()\n",
"\n",
"for new_text in streamer:\n",
" print(new_text, end=\"\", flush=True)"
]
},
{

View File

@@ -50,8 +50,8 @@
"source": [
"import os\n",
"\n",
"from langchain.chains import LLMChain\n",
"from langchain_community.llms import PipelineAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import PromptTemplate"
]
},
@@ -123,7 +123,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm_chain = prompt | llm | StrOutputParser()"
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
]
},
{
@@ -142,7 +142,7 @@
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"\n",
"llm_chain.invoke(question)"
"llm_chain.run(question)"
]
}
],

View File

@@ -88,7 +88,6 @@
" \"max_tokens_to_generate\": 1000,\n",
" \"temperature\": 0.01,\n",
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
" \"process_prompt\": False,\n",
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",
@@ -117,7 +116,6 @@
" \"max_tokens_to_generate\": 1000,\n",
" \"temperature\": 0.01,\n",
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
" \"process_prompt\": False,\n",
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",
@@ -177,7 +175,9 @@
"import os\n",
"\n",
"sambastudio_base_url = \"<Your SambaStudio environment URL>\"\n",
"sambastudio_base_uri = \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/generic\" set as default\n",
"sambastudio_base_uri = (\n",
" \"<Your SambaStudio endpoint base URI>\" # optional, \"api/predict/nlp\" set as default\n",
")\n",
"sambastudio_project_id = \"<Your SambaStudio project id>\"\n",
"sambastudio_endpoint_id = \"<Your SambaStudio endpoint id>\"\n",
"sambastudio_api_key = \"<Your SambaStudio endpoint API key>\"\n",
@@ -271,7 +271,6 @@
" \"do_sample\": True,\n",
" \"max_tokens_to_generate\": 1000,\n",
" \"temperature\": 0.01,\n",
" \"process_prompt\": False,\n",
" \"select_expert\": \"Meta-Llama-3-8B-Instruct\",\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",

View File

@@ -27,7 +27,7 @@
"outputs": [],
"source": [
"# Install the package\n",
"%pip install --upgrade --quiet langchain-community dashscope"
"%pip install --upgrade --quiet dashscope"
]
},
{

Some files were not shown because too many files have changed in this diff Show More